From 36d22d82aa202bb199967e9512281e9a53db42c9 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 21:33:14 +0200 Subject: Adding upstream version 115.7.0esr. Signed-off-by: Daniel Baumann --- testing/web-platform/tests/webdriver/META.yml | 9 + testing/web-platform/tests/webdriver/README.md | 17 + .../web-platform/tests/webdriver/tests/__init__.py | 4 + .../tests/webdriver/tests/bidi/__init__.py | 80 ++ .../tests/bidi/browsing_context/__init__.py | 65 ++ .../capture_screenshot/__init__.py | 21 + .../capture_screenshot/capture_screenshot.py | 36 + .../browsing_context/capture_screenshot/frame.py | 58 ++ .../browsing_context/capture_screenshot/invalid.py | 26 + .../browsing_context/classic_interop/__init__.py | 0 .../classic_interop/window_handle.py | 7 + .../tests/bidi/browsing_context/close/__init__.py | 0 .../tests/bidi/browsing_context/close/close.py | 23 + .../tests/bidi/browsing_context/close/invalid.py | 31 + .../browsing_context/context_created/__init__.py | 0 .../context_created/context_created.py | 225 +++++ .../tests/bidi/browsing_context/create/__init__.py | 0 .../tests/bidi/browsing_context/create/invalid.py | 53 ++ .../browsing_context/create/reference_context.py | 46 ++ .../tests/bidi/browsing_context/create/type.py | 41 + .../dom_content_loaded/__init__.py | 0 .../dom_content_loaded/dom_content_loaded.py | 148 ++++ .../bidi/browsing_context/get_tree/__init__.py | 0 .../tests/bidi/browsing_context/get_tree/frames.py | 90 ++ .../bidi/browsing_context/get_tree/invalid.py | 27 + .../bidi/browsing_context/get_tree/max_depth.py | 121 +++ .../tests/bidi/browsing_context/get_tree/root.py | 113 +++ .../tests/bidi/browsing_context/load/__init__.py | 0 .../tests/bidi/browsing_context/load/load.py | 137 ++++ .../bidi/browsing_context/navigate/__init__.py | 25 + .../bidi/browsing_context/navigate/about_blank.py | 33 + .../bidi/browsing_context/navigate/data_url.py | 101 +++ .../tests/bidi/browsing_context/navigate/error.py | 22 + .../tests/bidi/browsing_context/navigate/frame.py | 59 ++ .../tests/bidi/browsing_context/navigate/hash.py | 62 ++ .../tests/bidi/browsing_context/navigate/image.py | 56 ++ .../bidi/browsing_context/navigate/invalid.py | 52 ++ .../bidi/browsing_context/navigate/navigate.py | 83 ++ .../navigate/support/black_dot.png | Bin 0 -> 70 bytes .../browsing_context/navigate/support/empty.html | 0 .../browsing_context/navigate/support/empty.js | 1 + .../browsing_context/navigate/support/empty.svg | 2 + .../browsing_context/navigate/support/other.html | 0 .../browsing_context/navigate/support/other.svg | 3 + .../browsing_context/navigate/support/red_dot.png | Bin 0 -> 95 bytes .../tests/bidi/browsing_context/navigate/wait.py | 98 +++ .../tests/bidi/browsing_context/print/__init__.py | 0 .../bidi/browsing_context/print/background.py | 56 ++ .../tests/bidi/browsing_context/print/context.py | 61 ++ .../tests/bidi/browsing_context/print/invalid.py | 197 +++++ .../tests/bidi/browsing_context/print/margin.py | 148 ++++ .../bidi/browsing_context/print/orientation.py | 43 + .../tests/bidi/browsing_context/print/page.py | 39 + .../bidi/browsing_context/print/page_ranges.py | 131 +++ .../tests/bidi/browsing_context/print/scale.py | 57 ++ .../bidi/browsing_context/print/shrink_to_fit.py | 50 ++ .../tests/bidi/browsing_context/reload/invalid.py | 37 + .../tests/webdriver/tests/bidi/errors/__init__.py | 0 .../tests/webdriver/tests/bidi/errors/errors.py | 16 + .../tests/webdriver/tests/bidi/input/__init__.py | 38 + .../tests/webdriver/tests/bidi/input/conftest.py | 46 ++ .../tests/bidi/input/perform_actions/__init__.py | 64 ++ .../tests/bidi/input/perform_actions/invalid.py | 231 ++++++ .../tests/bidi/input/perform_actions/key.py | 54 ++ .../tests/bidi/input/perform_actions/key_events.py | 271 ++++++ .../bidi/input/perform_actions/key_modifier.py | 163 ++++ .../bidi/input/perform_actions/pointer_mouse.py | 254 ++++++ .../perform_actions/pointer_mouse_modifier.py | 242 ++++++ .../perform_actions/pointer_mouse_multiclick.py | 125 +++ .../bidi/input/perform_actions/pointer_origin.py | 140 ++++ .../bidi/input/perform_actions/pointer_pen.py | 73 ++ .../bidi/input/perform_actions/pointer_touch.py | 150 ++++ .../tests/bidi/input/perform_actions/wheel.py | 81 ++ .../tests/bidi/input/release_actions/__init__.py | 0 .../tests/bidi/input/release_actions/context.py | 42 + .../tests/bidi/input/release_actions/invalid.py | 16 + .../tests/bidi/input/release_actions/release.py | 28 + .../tests/bidi/input/release_actions/sequence.py | 82 ++ .../tests/webdriver/tests/bidi/log/__init__.py | 0 .../tests/bidi/log/entry_added/__init__.py | 129 +++ .../tests/bidi/log/entry_added/console.py | 170 ++++ .../tests/bidi/log/entry_added/console_args.py | 271 ++++++ .../tests/bidi/log/entry_added/event_buffer.py | 97 +++ .../tests/bidi/log/entry_added/javascript.py | 31 + .../webdriver/tests/bidi/log/entry_added/realm.py | 32 + .../tests/bidi/log/entry_added/stacktrace.py | 121 +++ .../tests/bidi/log/entry_added/subscription.py | 110 +++ .../tests/webdriver/tests/bidi/network/__init__.py | 221 +++++ .../bidi/network/before_request_sent/__init__.py | 0 .../before_request_sent/before_request_sent.py | 283 +++++++ .../tests/bidi/network/combined/__init__.py | 0 .../tests/bidi/network/combined/network_events.py | 121 +++ .../tests/webdriver/tests/bidi/network/conftest.py | 98 +++ .../bidi/network/response_completed/__init__.py | 0 .../response_completed/response_completed.py | 264 ++++++ .../response_completed_cached.py | 196 +++++ .../bidi/network/response_started/__init__.py | 0 .../network/response_started/response_started.py | 241 ++++++ .../response_started/response_started_cached.py | 204 +++++ .../tests/bidi/network/support/empty.html | 2 + .../webdriver/tests/bidi/network/support/empty.js | 1 + .../webdriver/tests/bidi/network/support/empty.png | Bin 0 -> 72 bytes .../webdriver/tests/bidi/network/support/empty.svg | 1 + .../webdriver/tests/bidi/network/support/empty.txt | 1 + .../bidi/network/support/redirect_http_equiv.html | 4 + .../tests/bidi/network/support/redirected.html | 2 + .../tests/webdriver/tests/bidi/script/__init__.py | 62 ++ .../bidi/script/add_preload_script/__init__.py | 0 .../add_preload_script/add_preload_script.py | 172 ++++ .../bidi/script/add_preload_script/arguments.py | 236 ++++++ .../bidi/script/add_preload_script/invalid.py | 194 +++++ .../bidi/script/add_preload_script/sandbox.py | 70 ++ .../tests/bidi/script/call_function/__init__.py | 0 .../tests/bidi/script/call_function/arguments.py | 678 +++++++++++++++ .../bidi/script/call_function/await_promise.py | 48 ++ .../bidi/script/call_function/exception_details.py | 211 +++++ .../script/call_function/function_declaration.py | 14 + .../tests/bidi/script/call_function/internal_id.py | 67 ++ .../tests/bidi/script/call_function/invalid.py | 423 ++++++++++ .../bidi/script/call_function/invalid_tentative.py | 38 + .../tests/bidi/script/call_function/realm.py | 71 ++ .../tests/bidi/script/call_function/result.py | 161 ++++ .../tests/bidi/script/call_function/result_node.py | 656 +++++++++++++++ .../bidi/script/call_function/result_ownership.py | 60 ++ .../tests/bidi/script/call_function/sandbox.py | 239 ++++++ .../script/call_function/serialization_options.py | 444 ++++++++++ .../tests/bidi/script/call_function/strict_mode.py | 38 + .../tests/bidi/script/call_function/this.py | 147 ++++ .../tests/bidi/script/classic_interop/__init__.py | 0 .../bidi/script/classic_interop/node_shared_id.py | 101 +++ .../tests/webdriver/tests/bidi/script/conftest.py | 67 ++ .../webdriver/tests/bidi/script/disown/__init__.py | 0 .../webdriver/tests/bidi/script/disown/handles.py | 173 ++++ .../webdriver/tests/bidi/script/disown/invalid.py | 68 ++ .../tests/bidi/script/disown/invalid_tentative.py | 35 + .../webdriver/tests/bidi/script/disown/target.py | 95 +++ .../tests/bidi/script/evaluate/__init__.py | 1 + .../tests/bidi/script/evaluate/await_promise.py | 220 +++++ .../tests/bidi/script/evaluate/evaluate.py | 95 +++ .../bidi/script/evaluate/exception_details.py | 212 +++++ .../tests/bidi/script/evaluate/internal_id.py | 65 ++ .../tests/bidi/script/evaluate/invalid.py | 153 ++++ .../bidi/script/evaluate/invalid_tentative.py | 38 + .../webdriver/tests/bidi/script/evaluate/result.py | 141 ++++ .../tests/bidi/script/evaluate/result_node.py | 644 +++++++++++++++ .../tests/bidi/script/evaluate/result_ownership.py | 60 ++ .../tests/bidi/script/evaluate/sandbox.py | 199 +++++ .../bidi/script/evaluate/serialization_options.py | 444 ++++++++++ .../tests/bidi/script/evaluate/strict_mode.py | 34 + .../tests/bidi/script/get_realms/__init__.py | 0 .../tests/bidi/script/get_realms/context.py | 70 ++ .../tests/bidi/script/get_realms/get_realms.py | 183 +++++ .../tests/bidi/script/get_realms/invalid.py | 26 + .../tests/bidi/script/get_realms/sandbox.py | 238 ++++++ .../webdriver/tests/bidi/script/get_realms/type.py | 34 + .../tests/bidi/script/message/__init__.py | 0 .../webdriver/tests/bidi/script/message/message.py | 101 +++ .../bidi/script/remove_preload_script/__init__.py | 0 .../bidi/script/remove_preload_script/invalid.py | 15 + .../remove_preload_script/remove_preload_script.py | 80 ++ .../bidi/script/remove_preload_script/sandbox.py | 42 + .../tests/webdriver/tests/bidi/session/__init__.py | 0 .../webdriver/tests/bidi/session/new/__init__.py | 0 .../webdriver/tests/bidi/session/new/connect.py | 34 + .../tests/bidi/session/status/__init__.py | 0 .../webdriver/tests/bidi/session/status/status.py | 11 + .../tests/bidi/session/subscribe/__init__.py | 0 .../tests/bidi/session/subscribe/contexts.py | 277 +++++++ .../tests/bidi/session/subscribe/events.py | 138 ++++ .../tests/bidi/session/subscribe/invalid.py | 156 ++++ .../tests/bidi/session/unsubscribe/__init__.py | 0 .../tests/bidi/session/unsubscribe/contexts.py | 167 ++++ .../tests/bidi/session/unsubscribe/events.py | 83 ++ .../tests/bidi/session/unsubscribe/invalid.py | 234 ++++++ .../tests/classic/accept_alert/__init__.py | 0 .../webdriver/tests/classic/accept_alert/accept.py | 110 +++ .../webdriver/tests/classic/add_cookie/__init__.py | 0 .../webdriver/tests/classic/add_cookie/add.py | 286 +++++++ .../tests/classic/add_cookie/user_prompts.py | 137 ++++ .../tests/webdriver/tests/classic/back/__init__.py | 0 .../tests/webdriver/tests/classic/back/back.py | 169 ++++ .../tests/webdriver/tests/classic/back/conftest.py | 19 + .../webdriver/tests/classic/back/user_prompts.py | 118 +++ .../tests/classic/close_window/__init__.py | 0 .../webdriver/tests/classic/close_window/close.py | 102 +++ .../tests/classic/close_window/user_prompts.py | 119 +++ .../tests/classic/delete_all_cookies/__init__.py | 0 .../tests/classic/delete_all_cookies/delete.py | 22 + .../classic/delete_all_cookies/user_prompts.py | 119 +++ .../tests/classic/delete_cookie/__init__.py | 0 .../tests/classic/delete_cookie/delete.py | 29 + .../tests/classic/delete_cookie/user_prompts.py | 119 +++ .../tests/classic/delete_session/__init__.py | 0 .../tests/classic/delete_session/delete.py | 42 + .../tests/classic/dismiss_alert/__init__.py | 0 .../tests/classic/dismiss_alert/dismiss.py | 109 +++ .../tests/classic/element_clear/__init__.py | 0 .../webdriver/tests/classic/element_clear/clear.py | 454 +++++++++++ .../tests/classic/element_clear/user_prompts.py | 131 +++ .../tests/classic/element_click/__init__.py | 0 .../tests/classic/element_click/bubbling.py | 157 ++++ .../tests/classic/element_click/center_point.py | 64 ++ .../webdriver/tests/classic/element_click/click.py | 99 +++ .../tests/classic/element_click/events.py | 35 + .../tests/classic/element_click/file_upload.py | 16 + .../tests/classic/element_click/interactability.py | 130 +++ .../tests/classic/element_click/navigate.py | 198 +++++ .../classic/element_click/scroll_into_view.py | 72 ++ .../tests/classic/element_click/select.py | 223 +++++ .../tests/classic/element_click/shadow_dom.py | 53 ++ .../tests/classic/element_click/support/input.html | 3 + .../element_click/support/test_click_wdspec.html | 100 +++ .../tests/classic/element_click/user_prompts.py | 122 +++ .../tests/classic/element_send_keys/__init__.py | 2 + .../tests/classic/element_send_keys/conftest.py | 17 + .../classic/element_send_keys/content_editable.py | 30 + .../tests/classic/element_send_keys/events.py | 85 ++ .../tests/classic/element_send_keys/file_upload.py | 262 ++++++ .../classic/element_send_keys/form_controls.py | 102 +++ .../classic/element_send_keys/interactability.py | 142 ++++ .../classic/element_send_keys/scroll_into_view.py | 40 + .../tests/classic/element_send_keys/send_keys.py | 121 +++ .../classic/element_send_keys/user_prompts.py | 123 +++ .../tests/classic/execute_async_script/__init__.py | 16 + .../classic/execute_async_script/arguments.py | 176 ++++ .../classic/execute_async_script/collections.py | 161 ++++ .../tests/classic/execute_async_script/cyclic.py | 78 ++ .../classic/execute_async_script/execute_async.py | 80 ++ .../tests/classic/execute_async_script/node.py | 88 ++ .../tests/classic/execute_async_script/objects.py | 49 ++ .../tests/classic/execute_async_script/promise.py | 118 +++ .../classic/execute_async_script/properties.py | 64 ++ .../classic/execute_async_script/user_prompts.py | 109 +++ .../tests/classic/execute_script/__init__.py | 16 + .../tests/classic/execute_script/arguments.py | 164 ++++ .../tests/classic/execute_script/collections.py | 138 ++++ .../tests/classic/execute_script/cyclic.py | 78 ++ .../tests/classic/execute_script/execute.py | 115 +++ .../execute_script/json_serialize_windowproxy.py | 51 ++ .../webdriver/tests/classic/execute_script/node.py | 87 ++ .../tests/classic/execute_script/objects.py | 49 ++ .../tests/classic/execute_script/promise.py | 102 +++ .../tests/classic/execute_script/properties.py | 60 ++ .../tests/classic/execute_script/user_prompts.py | 107 +++ .../tests/classic/find_element/__init__.py | 0 .../webdriver/tests/classic/find_element/find.py | 121 +++ .../tests/classic/find_element/user_prompts.py | 120 +++ .../classic/find_element_from_element/__init__.py | 0 .../classic/find_element_from_element/find.py | 179 ++++ .../find_element_from_element/user_prompts.py | 125 +++ .../find_element_from_shadow_root/__init__.py | 0 .../classic/find_element_from_shadow_root/find.py | 228 ++++++ .../find_element_from_shadow_root/user_prompts.py | 134 +++ .../tests/classic/find_elements/__init__.py | 0 .../webdriver/tests/classic/find_elements/find.py | 141 ++++ .../tests/classic/find_elements/user_prompts.py | 122 +++ .../classic/find_elements_from_element/__init__.py | 0 .../classic/find_elements_from_element/find.py | 199 +++++ .../find_elements_from_element/user_prompts.py | 127 +++ .../find_elements_from_shadow_root/__init__.py | 0 .../classic/find_elements_from_shadow_root/find.py | 237 ++++++ .../find_elements_from_shadow_root/user_prompts.py | 135 +++ .../webdriver/tests/classic/forward/__init__.py | 0 .../webdriver/tests/classic/forward/conftest.py | 19 + .../webdriver/tests/classic/forward/forward.py | 195 +++++ .../tests/classic/forward/user_prompts.py | 121 +++ .../tests/classic/fullscreen_window/__init__.py | 0 .../tests/classic/fullscreen_window/fullscreen.py | 53 ++ .../tests/classic/fullscreen_window/stress.py | 19 + .../classic/fullscreen_window/user_prompts.py | 116 +++ .../tests/classic/get_active_element/__init__.py | 0 .../tests/classic/get_active_element/get.py | 154 ++++ .../classic/get_active_element/user_prompts.py | 118 +++ .../tests/classic/get_alert_text/__init__.py | 0 .../webdriver/tests/classic/get_alert_text/get.py | 70 ++ .../tests/classic/get_computed_label/__init__.py | 0 .../tests/classic/get_computed_label/get.py | 89 ++ .../tests/classic/get_computed_role/__init__.py | 0 .../tests/classic/get_computed_role/get.py | 87 ++ .../tests/classic/get_current_url/__init__.py | 0 .../tests/classic/get_current_url/file.py | 23 + .../webdriver/tests/classic/get_current_url/get.py | 74 ++ .../tests/classic/get_current_url/iframe.py | 75 ++ .../tests/classic/get_current_url/user_prompts.py | 111 +++ .../classic/get_element_attribute/__init__.py | 0 .../tests/classic/get_element_attribute/get.py | 167 ++++ .../classic/get_element_attribute/user_prompts.py | 117 +++ .../classic/get_element_css_value/__init__.py | 0 .../tests/classic/get_element_css_value/get.py | 107 +++ .../classic/get_element_css_value/user_prompts.py | 120 +++ .../tests/classic/get_element_property/__init__.py | 0 .../tests/classic/get_element_property/get.py | 215 +++++ .../classic/get_element_property/user_prompts.py | 115 +++ .../tests/classic/get_element_rect/__init__.py | 1 + .../tests/classic/get_element_rect/get.py | 99 +++ .../tests/classic/get_element_rect/user_prompts.py | 120 +++ .../classic/get_element_shadow_root/__init__.py | 0 .../tests/classic/get_element_shadow_root/get.py | 102 +++ .../get_element_shadow_root/user_prompts.py | 117 +++ .../tests/classic/get_element_tag_name/__init__.py | 0 .../tests/classic/get_element_tag_name/get.py | 95 +++ .../classic/get_element_tag_name/user_prompts.py | 114 +++ .../tests/classic/get_element_text/__init__.py | 0 .../tests/classic/get_element_text/get.py | 109 +++ .../tests/classic/get_element_text/user_prompts.py | 116 +++ .../tests/classic/get_named_cookie/__init__.py | 0 .../tests/classic/get_named_cookie/get.py | 145 ++++ .../tests/classic/get_named_cookie/user_prompts.py | 118 +++ .../tests/classic/get_page_source/__init__.py | 0 .../tests/classic/get_page_source/source.py | 25 + .../tests/classic/get_page_source/user_prompts.py | 112 +++ .../tests/classic/get_timeouts/__init__.py | 0 .../webdriver/tests/classic/get_timeouts/get.py | 34 + .../webdriver/tests/classic/get_title/__init__.py | 0 .../tests/webdriver/tests/classic/get_title/get.py | 56 ++ .../webdriver/tests/classic/get_title/iframe.py | 80 ++ .../tests/classic/get_title/user_prompts.py | 134 +++ .../tests/classic/get_window_handle/__init__.py | 0 .../tests/classic/get_window_handle/get.py | 43 + .../classic/get_window_handle/user_prompts.py | 61 ++ .../tests/classic/get_window_handles/__init__.py | 0 .../tests/classic/get_window_handles/get.py | 37 + .../classic/get_window_handles/user_prompts.py | 61 ++ .../tests/classic/get_window_rect/__init__.py | 0 .../webdriver/tests/classic/get_window_rect/get.py | 31 + .../tests/classic/get_window_rect/user_prompts.py | 113 +++ .../webdriver/tests/classic/idlharness.window.js | 16 + .../webdriver/tests/classic/interface/interface.py | 2 + .../tests/classic/is_element_enabled/__init__.py | 0 .../tests/classic/is_element_enabled/enabled.py | 171 ++++ .../classic/is_element_enabled/user_prompts.py | 119 +++ .../tests/classic/is_element_selected/__init__.py | 0 .../tests/classic/is_element_selected/selected.py | 138 ++++ .../classic/is_element_selected/user_prompts.py | 117 +++ .../tests/classic/maximize_window/__init__.py | 0 .../tests/classic/maximize_window/maximize.py | 100 +++ .../tests/classic/maximize_window/stress.py | 43 + .../tests/classic/maximize_window/user_prompts.py | 117 +++ .../tests/classic/minimize_window/__init__.py | 0 .../tests/classic/minimize_window/minimize.py | 69 ++ .../tests/classic/minimize_window/stress.py | 19 + .../tests/classic/minimize_window/user_prompts.py | 113 +++ .../tests/classic/navigate_to/__init__.py | 0 .../webdriver/tests/classic/navigate_to/file.py | 25 + .../tests/classic/navigate_to/navigate.py | 100 +++ .../tests/classic/navigate_to/user_prompts.py | 112 +++ .../tests/classic/new_session/__init__.py | 0 .../tests/classic/new_session/conftest.py | 79 ++ .../classic/new_session/create_alwaysMatch.py | 15 + .../tests/classic/new_session/create_firstMatch.py | 16 + .../tests/classic/new_session/default_values.py | 39 + .../classic/new_session/invalid_capabilities.py | 56 ++ .../webdriver/tests/classic/new_session/merge.py | 82 ++ .../tests/classic/new_session/no_capabilities.py | 8 + .../classic/new_session/page_load_strategy.py | 7 + .../tests/classic/new_session/platform_name.py | 11 + .../tests/classic/new_session/response.py | 44 + .../tests/classic/new_session/support/__init__.py | 0 .../tests/classic/new_session/support/create.py | 136 ++++ .../tests/classic/new_session/timeouts.py | 32 + .../tests/classic/new_session/websocket_url.py | 7 + .../webdriver/tests/classic/new_window/__init__.py | 10 + .../webdriver/tests/classic/new_window/new.py | 64 ++ .../webdriver/tests/classic/new_window/new_tab.py | 89 ++ .../tests/classic/new_window/new_window.py | 89 ++ .../tests/classic/new_window/user_prompts.py | 121 +++ .../tests/classic/perform_actions/__init__.py | 0 .../tests/classic/perform_actions/conftest.py | 89 ++ .../webdriver/tests/classic/perform_actions/key.py | 38 + .../tests/classic/perform_actions/key_events.py | 223 +++++ .../tests/classic/perform_actions/key_modifiers.py | 37 + .../tests/classic/perform_actions/key_shortcuts.py | 49 ++ .../classic/perform_actions/key_special_keys.py | 38 + .../tests/classic/perform_actions/none.py | 24 + .../classic/perform_actions/pointer_contextmenu.py | 78 ++ .../classic/perform_actions/pointer_dblclick.py | 33 + .../perform_actions/pointer_modifier_click.py | 91 +++ .../tests/classic/perform_actions/pointer_mouse.py | 206 +++++ .../classic/perform_actions/pointer_origin.py | 123 +++ .../perform_actions/pointer_pause_dblclick.py | 56 ++ .../tests/classic/perform_actions/pointer_pen.py | 75 ++ .../tests/classic/perform_actions/pointer_touch.py | 93 +++ .../classic/perform_actions/pointer_tripleclick.py | 36 + .../tests/classic/perform_actions/sequence.py | 9 + .../classic/perform_actions/support/__init__.py | 0 .../tests/classic/perform_actions/support/mouse.py | 26 + .../classic/perform_actions/support/refine.py | 29 + .../tests/classic/perform_actions/user_prompts.py | 124 +++ .../tests/classic/perform_actions/validity.py | 80 ++ .../tests/classic/perform_actions/wheel.py | 75 ++ .../tests/classic/permissions/__init__.py | 0 .../webdriver/tests/classic/permissions/set.py | 83 ++ .../webdriver/tests/classic/print/__init__.py | 21 + .../webdriver/tests/classic/print/background.py | 59 ++ .../webdriver/tests/classic/print/orientation.py | 44 + .../webdriver/tests/classic/print/printcmd.py | 130 +++ .../webdriver/tests/classic/print/user_prompts.py | 109 +++ .../webdriver/tests/classic/refresh/__init__.py | 0 .../webdriver/tests/classic/refresh/refresh.py | 123 +++ .../tests/classic/refresh/user_prompts.py | 117 +++ .../tests/classic/release_actions/__init__.py | 0 .../tests/classic/release_actions/conftest.py | 40 + .../tests/classic/release_actions/release.py | 23 + .../tests/classic/release_actions/sequence.py | 79 ++ .../classic/release_actions/support/__init__.py | 0 .../classic/release_actions/support/refine.py | 24 + .../tests/classic/send_alert_text/__init__.py | 0 .../tests/classic/send_alert_text/conftest.py | 24 + .../tests/classic/send_alert_text/send.py | 94 +++ .../tests/classic/set_timeouts/__init__.py | 0 .../webdriver/tests/classic/set_timeouts/set.py | 95 +++ .../tests/classic/set_timeouts/user_prompts.py | 62 ++ .../tests/classic/set_window_rect/__init__.py | 0 .../webdriver/tests/classic/set_window_rect/set.py | 403 +++++++++ .../tests/classic/set_window_rect/user_prompts.py | 121 +++ .../webdriver/tests/classic/status/__init__.py | 0 .../tests/webdriver/tests/classic/status/status.py | 33 + .../tests/classic/switch_to_frame/__init__.py | 0 .../tests/classic/switch_to_frame/cross_origin.py | 63 ++ .../tests/classic/switch_to_frame/switch.py | 125 +++ .../tests/classic/switch_to_frame/switch_number.py | 50 ++ .../classic/switch_to_frame/switch_webelement.py | 100 +++ .../classic/switch_to_parent_frame/__init__.py | 0 .../tests/classic/switch_to_parent_frame/switch.py | 85 ++ .../tests/classic/switch_to_window/__init__.py | 0 .../tests/classic/switch_to_window/alerts.py | 33 + .../tests/classic/switch_to_window/switch.py | 100 +++ .../classic/take_element_screenshot/__init__.py | 10 + .../classic/take_element_screenshot/iframe.py | 121 +++ .../classic/take_element_screenshot/screenshot.py | 100 +++ .../take_element_screenshot/user_prompts.py | 121 +++ .../tests/classic/take_screenshot/__init__.py | 21 + .../tests/classic/take_screenshot/iframe.py | 54 ++ .../tests/classic/take_screenshot/screenshot.py | 34 + .../tests/classic/take_screenshot/user_prompts.py | 113 +++ .../web-platform/tests/webdriver/tests/conftest.py | 5 + .../tests/webdriver/tests/support/__init__.py | 14 + .../tests/webdriver/tests/support/asserts.py | 224 +++++ .../tests/webdriver/tests/support/defaults.py | 6 + .../tests/webdriver/tests/support/fixtures.py | 442 ++++++++++ .../tests/webdriver/tests/support/fixtures_bidi.py | 363 +++++++++ .../tests/webdriver/tests/support/fixtures_http.py | 240 ++++++ .../tests/webdriver/tests/support/helpers.py | 271 ++++++ .../webdriver/tests/support/html/deleteframe.html | 6 + .../tests/webdriver/tests/support/html/frames.html | 16 + .../tests/support/html/frames_no_bfcache.html | 18 + .../support/html/meta-utf8-after-1024-bytes.html | 17 + .../tests/webdriver/tests/support/html/render.html | 68 ++ .../webdriver/tests/support/html/subframe.html | 16 + .../webdriver/tests/support/html/test_actions.html | 216 +++++ .../tests/support/html/test_actions_pointer.html | 102 +++ .../tests/support/html/test_actions_scroll.html | 103 +++ .../tests/support/http_handlers/__init__.py | 0 .../tests/support/http_handlers/authentication.py | 25 + .../tests/support/http_handlers/cached.py | 14 + .../tests/support/http_handlers/headers.py | 19 + .../tests/support/http_handlers/must-revalidate.py | 17 + .../tests/support/http_handlers/redirect.py | 19 + .../tests/support/http_handlers/status.py | 16 + .../tests/webdriver/tests/support/http_request.py | 40 + .../tests/webdriver/tests/support/image.py | 37 + .../tests/webdriver/tests/support/inline.py | 61 ++ .../tests/webdriver/tests/support/keys.py | 905 +++++++++++++++++++++ .../webdriver/tests/support/merge_dictionaries.py | 42 + .../tests/webdriver/tests/support/pdf.py | 8 + .../tests/webdriver/tests/support/screenshot.py | 50 ++ .../tests/webdriver/tests/support/sync.py | 276 +++++++ 467 files changed, 38684 insertions(+) create mode 100644 testing/web-platform/tests/webdriver/META.yml create mode 100644 testing/web-platform/tests/webdriver/README.md create mode 100644 testing/web-platform/tests/webdriver/tests/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/capture_screenshot.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/frame.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/window_handle.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/close.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/context_created.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/reference_context.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/type.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/dom_content_loaded.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/frames.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/max_depth.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/root.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/load.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/about_blank.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/data_url.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/error.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/frame.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/hash.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/image.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/navigate.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/black_dot.png create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.html create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.js create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.html create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.svg create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/red_dot.png create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/wait.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/background.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/context.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/margin.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/orientation.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/page.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/page_ranges.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/scale.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/shrink_to_fit.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/browsing_context/reload/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/errors/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/errors/errors.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/conftest.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key_events.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key_modifier.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse_modifier.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse_multiclick.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_origin.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_pen.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_touch.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/wheel.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/context.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/release.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/sequence.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/log/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console_args.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/event_buffer.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/javascript.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/realm.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/stacktrace.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/subscription.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/before_request_sent.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/combined/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/combined/network_events.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/conftest.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed_cached.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/response_started/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started_cached.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.html create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.js create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.png create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.svg create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.txt create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/support/redirect_http_equiv.html create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/network/support/redirected.html create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/add_preload_script.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/arguments.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/sandbox.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/arguments.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/await_promise.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/exception_details.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/function_declaration.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/internal_id.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid_tentative.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/realm.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_node.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_ownership.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/sandbox.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/serialization_options.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/strict_mode.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/call_function/this.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/classic_interop/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/classic_interop/node_shared_id.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/conftest.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/disown/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/disown/handles.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid_tentative.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/disown/target.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/await_promise.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/evaluate.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/exception_details.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/internal_id.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid_tentative.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_node.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_ownership.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/sandbox.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/serialization_options.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/strict_mode.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/context.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/get_realms.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/sandbox.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/type.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/message/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/message/message.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/remove_preload_script.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/sandbox.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/new/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/new/connect.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/status/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/status/status.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/contexts.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/events.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/contexts.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/events.py create mode 100644 testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/invalid.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/accept_alert/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/accept_alert/accept.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/add_cookie/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/add_cookie/add.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/add_cookie/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/back/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/back/back.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/back/conftest.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/back/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/close_window/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/close_window/close.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/close_window/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/delete_all_cookies/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/delete_all_cookies/delete.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/delete_all_cookies/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/delete_cookie/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/delete_cookie/delete.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/delete_cookie/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/delete_session/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/delete_session/delete.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/dismiss_alert/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/dismiss_alert/dismiss.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_clear/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_clear/clear.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_clear/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/bubbling.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/center_point.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/click.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/events.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/file_upload.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/interactability.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/navigate.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/scroll_into_view.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/select.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/shadow_dom.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/support/input.html create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/support/test_click_wdspec.html create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_click/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_send_keys/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_send_keys/conftest.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_send_keys/content_editable.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_send_keys/events.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_send_keys/file_upload.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_send_keys/form_controls.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_send_keys/interactability.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_send_keys/scroll_into_view.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_send_keys/send_keys.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/element_send_keys/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_async_script/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_async_script/arguments.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_async_script/collections.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_async_script/cyclic.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_async_script/execute_async.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_async_script/node.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_async_script/objects.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_async_script/promise.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_async_script/properties.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_async_script/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_script/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_script/arguments.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_script/collections.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_script/cyclic.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_script/execute.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_script/json_serialize_windowproxy.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_script/node.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_script/objects.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_script/promise.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_script/properties.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/execute_script/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_element/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_element/find.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_element/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_element_from_element/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_element_from_element/find.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_element_from_element/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_element_from_shadow_root/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_element_from_shadow_root/find.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_element_from_shadow_root/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_elements/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_elements/find.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_elements/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_elements_from_element/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_elements_from_element/find.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_elements_from_element/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_elements_from_shadow_root/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_elements_from_shadow_root/find.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/find_elements_from_shadow_root/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/forward/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/forward/conftest.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/forward/forward.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/forward/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/fullscreen_window/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/fullscreen_window/fullscreen.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/fullscreen_window/stress.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/fullscreen_window/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_active_element/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_active_element/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_active_element/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_alert_text/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_alert_text/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_computed_label/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_computed_label/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_computed_role/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_computed_role/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_current_url/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_current_url/file.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_current_url/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_current_url/iframe.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_current_url/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_attribute/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_attribute/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_attribute/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_css_value/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_css_value/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_css_value/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_property/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_property/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_property/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_rect/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_rect/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_rect/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_shadow_root/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_shadow_root/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_shadow_root/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_tag_name/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_tag_name/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_tag_name/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_text/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_text/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_element_text/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_named_cookie/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_named_cookie/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_named_cookie/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_page_source/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_page_source/source.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_page_source/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_timeouts/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_timeouts/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_title/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_title/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_title/iframe.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_title/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_window_handle/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_window_handle/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_window_handle/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_window_handles/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_window_handles/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_window_handles/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_window_rect/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_window_rect/get.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/get_window_rect/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/idlharness.window.js create mode 100644 testing/web-platform/tests/webdriver/tests/classic/interface/interface.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/is_element_enabled/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/is_element_enabled/enabled.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/is_element_enabled/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/is_element_selected/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/is_element_selected/selected.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/is_element_selected/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/maximize_window/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/maximize_window/maximize.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/maximize_window/stress.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/maximize_window/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/minimize_window/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/minimize_window/minimize.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/minimize_window/stress.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/minimize_window/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/navigate_to/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/navigate_to/file.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/navigate_to/navigate.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/navigate_to/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/conftest.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/create_alwaysMatch.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/create_firstMatch.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/default_values.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/invalid_capabilities.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/merge.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/no_capabilities.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/page_load_strategy.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/platform_name.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/response.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/support/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/support/create.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/timeouts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_session/websocket_url.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_window/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_window/new.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_window/new_tab.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_window/new_window.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/new_window/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/conftest.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/key.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/key_events.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/key_modifiers.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/key_shortcuts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/key_special_keys.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/none.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/pointer_contextmenu.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/pointer_dblclick.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/pointer_modifier_click.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/pointer_mouse.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/pointer_origin.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/pointer_pause_dblclick.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/pointer_pen.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/pointer_touch.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/pointer_tripleclick.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/sequence.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/support/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/support/mouse.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/support/refine.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/validity.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/perform_actions/wheel.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/permissions/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/permissions/set.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/print/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/print/background.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/print/orientation.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/print/printcmd.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/print/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/refresh/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/refresh/refresh.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/refresh/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/release_actions/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/release_actions/conftest.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/release_actions/release.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/release_actions/sequence.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/release_actions/support/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/release_actions/support/refine.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/send_alert_text/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/send_alert_text/conftest.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/send_alert_text/send.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/set_timeouts/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/set_timeouts/set.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/set_timeouts/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/set_window_rect/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/set_window_rect/set.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/set_window_rect/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/status/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/status/status.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/switch_to_frame/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/switch_to_frame/cross_origin.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/switch_to_frame/switch.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/switch_to_frame/switch_number.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/switch_to_frame/switch_webelement.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/switch_to_parent_frame/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/switch_to_parent_frame/switch.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/switch_to_window/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/switch_to_window/alerts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/switch_to_window/switch.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/take_element_screenshot/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/take_element_screenshot/iframe.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/take_element_screenshot/screenshot.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/take_element_screenshot/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/take_screenshot/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/take_screenshot/iframe.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/take_screenshot/screenshot.py create mode 100644 testing/web-platform/tests/webdriver/tests/classic/take_screenshot/user_prompts.py create mode 100644 testing/web-platform/tests/webdriver/tests/conftest.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/asserts.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/defaults.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/fixtures.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/fixtures_bidi.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/fixtures_http.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/helpers.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/html/deleteframe.html create mode 100644 testing/web-platform/tests/webdriver/tests/support/html/frames.html create mode 100644 testing/web-platform/tests/webdriver/tests/support/html/frames_no_bfcache.html create mode 100644 testing/web-platform/tests/webdriver/tests/support/html/meta-utf8-after-1024-bytes.html create mode 100644 testing/web-platform/tests/webdriver/tests/support/html/render.html create mode 100644 testing/web-platform/tests/webdriver/tests/support/html/subframe.html create mode 100644 testing/web-platform/tests/webdriver/tests/support/html/test_actions.html create mode 100644 testing/web-platform/tests/webdriver/tests/support/html/test_actions_pointer.html create mode 100644 testing/web-platform/tests/webdriver/tests/support/html/test_actions_scroll.html create mode 100644 testing/web-platform/tests/webdriver/tests/support/http_handlers/__init__.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/http_handlers/authentication.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/http_handlers/cached.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/http_handlers/headers.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/http_handlers/must-revalidate.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/http_handlers/redirect.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/http_handlers/status.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/http_request.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/image.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/inline.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/keys.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/merge_dictionaries.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/pdf.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/screenshot.py create mode 100644 testing/web-platform/tests/webdriver/tests/support/sync.py (limited to 'testing/web-platform/tests/webdriver') diff --git a/testing/web-platform/tests/webdriver/META.yml b/testing/web-platform/tests/webdriver/META.yml new file mode 100644 index 0000000000..8475a173ab --- /dev/null +++ b/testing/web-platform/tests/webdriver/META.yml @@ -0,0 +1,9 @@ +spec: https://w3c.github.io/webdriver/ +suggested_reviewers: + - AutomatedTester + - bwalderman + - jgraham + - juliandescottes + - sadym-chromium + - shs96c + - whimboo diff --git a/testing/web-platform/tests/webdriver/README.md b/testing/web-platform/tests/webdriver/README.md new file mode 100644 index 0000000000..67bb294d6e --- /dev/null +++ b/testing/web-platform/tests/webdriver/README.md @@ -0,0 +1,17 @@ +# WebDriver specification tests + +Herein lies a set of conformance tests +for the W3C web browser automation specification +known as [WebDriver](http://w3c.github.io/webdriver/). +The purpose of these tests is determine implementation compliance +so that different driver implementations can determine +whether they meet the recognized standard. + +## Chapters of the Spec that still need tests + +We are using a [tracking spreadsheet](https://docs.google.com/spreadsheets/d/1GUK_sdY2cv59VAJNDxZQIfypnOpapSQhMjfcJ9Wc42U/edit#gid=0) +to coordinate work on these tests. Please look there to see who +is working on what, and which areas are currently under-tested. + +The spec contributors and editors can frequently be found on the W3C +#webdriver IRC channel. diff --git a/testing/web-platform/tests/webdriver/tests/__init__.py b/testing/web-platform/tests/webdriver/tests/__init__.py new file mode 100644 index 0000000000..0ba172ff2e --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/__init__.py @@ -0,0 +1,4 @@ +import pytest + +# Enable pytest assert introspection for assertion helper +pytest.register_assert_rewrite('tests.support.asserts') diff --git a/testing/web-platform/tests/webdriver/tests/bidi/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/__init__.py new file mode 100644 index 0000000000..625cd3a630 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/__init__.py @@ -0,0 +1,80 @@ +from typing import Any, Callable + +from webdriver.bidi.modules.script import ContextTarget + + +# Compares 2 objects recursively. +# Actual value can have more keys as part of the forwards-compat design. +# Expected value can be a callable delegate, asserting the value. +def recursive_compare(expected: Any, actual: Any) -> None: + if callable(expected): + expected(actual) + return + + assert type(expected) == type(actual) + if type(expected) is list: + assert len(expected) == len(actual) + for index, _ in enumerate(expected): + recursive_compare(expected[index], actual[index]) + return + + if type(expected) is dict: + # Actual dict can have more keys as part of the forwards-compat design. + assert expected.keys() <= actual.keys(), \ + f"Key set should be present: {set(expected.keys()) - set(actual.keys())}" + for key in expected.keys(): + recursive_compare(expected[key], actual[key]) + return + + assert expected == actual + + +def any_bool(actual: Any) -> None: + assert isinstance(actual, bool) + + +def any_dict(actual: Any) -> None: + assert isinstance(actual, dict) + + +def any_int(actual: Any) -> None: + assert isinstance(actual, int) + + +def any_int_or_null(actual: Any) -> None: + if actual is not None: + any_int(actual) + + +def any_list(actual: Any) -> None: + assert isinstance(actual, list) + + +def any_string(actual: Any) -> None: + assert isinstance(actual, str) + + +def any_string_or_null(actual: Any) -> None: + if actual is not None: + any_string(actual) + + +def int_interval(start: int, end: int) -> Callable[[Any], None]: + def _(actual: Any) -> None: + any_int(actual) + assert start <= actual <= end + + return _ + +def positive_int(actual: Any) -> None: + assert isinstance(actual, int) and actual > 0 + + +async def create_console_api_message(bidi_session, context, text): + await bidi_session.script.call_function( + function_declaration="""(text) => console.log(text)""", + arguments=[{"type": "string", "value": text}], + await_promise=False, + target=ContextTarget(context["context"]), + ) + return text diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/__init__.py new file mode 100644 index 0000000000..a887aeb8a4 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/__init__.py @@ -0,0 +1,65 @@ +from .. import ( + any_int, + any_string, + any_string_or_null, + recursive_compare, +) + +def assert_browsing_context( + info, context, children=None, is_root=True, parent=None, url=None +): + assert "children" in info + if children is not None: + assert isinstance(info["children"], list) + assert len(info["children"]) == children + else: + assert info["children"] is None + + assert "context" in info + assert isinstance(info["context"], str) + # Note: Only the tests for browsingContext.getTree should be allowed to + # pass None here because it's not possible to assert the exact browsing + # context id for frames. + if context is not None: + assert info["context"] == context + + if is_root: + if parent is None: + # For a top-level browsing context there is no parent + assert info["parent"] is None + else: + assert "parent" in info + assert isinstance(info["parent"], str) + assert info["parent"] == parent + else: + # non root browsing context entries do not contain a parent + assert "parent" not in info + assert parent is None + + assert "url" in info + assert isinstance(info["url"], str) + assert info["url"] == url + + +def assert_navigation_info(event, expected_navigation_info): + recursive_compare( + { + "context": any_string, + "navigation": any_string_or_null, + "timestamp": any_int, + "url": any_string, + }, + event, + ) + + if "context" in expected_navigation_info: + assert event["context"] == expected_navigation_info["context"] + + if "navigation" in expected_navigation_info: + assert event["navigation"] == expected_navigation_info["navigation"] + + if "timestamp" in expected_navigation_info: + expected_navigation_info["timestamp"](event["timestamp"]) + + if "url" in expected_navigation_info: + assert event["url"] == expected_navigation_info["url"] diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/__init__.py new file mode 100644 index 0000000000..066c34f3f2 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/__init__.py @@ -0,0 +1,21 @@ +from webdriver.bidi.modules.script import ContextTarget + +async def viewport_dimensions(bidi_session, context): + """Get the dimensions of the context's viewport. + + :param bidi_session: BiDiSession + :param context: Browsing context ID + :returns: Tuple of (int, int) containing viewport width, viewport height. + """ + result = await bidi_session.script.call_function( + function_declaration="""() => { + const {devicePixelRatio, innerHeight, innerWidth} = window; + + return [ + Math.floor(innerWidth * devicePixelRatio), + Math.floor(innerHeight * devicePixelRatio) + ]; + }""", + target=ContextTarget(context["context"]), + await_promise=False) + return tuple(item["value"] for item in result["value"]) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/capture_screenshot.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/capture_screenshot.py new file mode 100644 index 0000000000..4c49fa2f85 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/capture_screenshot.py @@ -0,0 +1,36 @@ +import pytest +from tests.support.image import png_dimensions + +from . import viewport_dimensions + + +@pytest.mark.asyncio +async def test_capture(bidi_session, url, top_context, inline, compare_png_bidi): + expected_size = await viewport_dimensions(bidi_session, top_context) + + await bidi_session.browsing_context.navigate( + context=top_context["context"], url="about:blank", wait="complete" + ) + reference_data = await bidi_session.browsing_context.capture_screenshot( + context=top_context["context"]) + assert png_dimensions(reference_data) == expected_size + + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=inline("
foo
"), wait="complete" + ) + data = await bidi_session.browsing_context.capture_screenshot( + context=top_context["context"]) + + comparison = await compare_png_bidi(data, reference_data) + assert not comparison.equal() + + # Take a second screenshot that should be identical to validate that + # we don't just always return false here + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=inline("
foo
"), wait="complete" + ) + new_data = await bidi_session.browsing_context.capture_screenshot( + context=top_context["context"]) + + comparison = await compare_png_bidi(new_data, data) + assert comparison.equal() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/frame.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/frame.py new file mode 100644 index 0000000000..a3013f2e07 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/frame.py @@ -0,0 +1,58 @@ +import pytest +from tests.support.image import png_dimensions +from tests.support.screenshot import ( + DEFAULT_CONTENT, + INNER_IFRAME_STYLE, + OUTER_IFRAME_STYLE, + REFERENCE_CONTENT, + REFERENCE_STYLE, +) + +from . import viewport_dimensions + + +@pytest.mark.asyncio +async def test_iframe(bidi_session, top_context, inline, iframe): + viewport_size = await viewport_dimensions(bidi_session, top_context) + + iframe_content = f"{INNER_IFRAME_STYLE}{DEFAULT_CONTENT}" + url = inline(f"{OUTER_IFRAME_STYLE}{iframe(iframe_content)}") + await bidi_session.browsing_context.navigate(context=top_context["context"], + url=url, + wait="complete") + reference_data = await bidi_session.browsing_context.capture_screenshot( + context=top_context["context"]) + assert png_dimensions(reference_data) == viewport_size + + all_contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"]) + frame_context = all_contexts[0]["children"][0] + + data = await bidi_session.browsing_context.capture_screenshot(context=frame_context["context"]) + + assert png_dimensions(data) < png_dimensions(reference_data) + + +@pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"]) +@pytest.mark.asyncio +async def test_context_origin(bidi_session, top_context, inline, iframe, compare_png_bidi, domain): + expected_size = await viewport_dimensions(bidi_session, top_context) + + initial_url = inline(f"{REFERENCE_STYLE}{REFERENCE_CONTENT}") + await bidi_session.browsing_context.navigate(context=top_context["context"], + url=initial_url, + wait="complete") + + reference_data = await bidi_session.browsing_context.capture_screenshot( + context=top_context["context"]) + assert png_dimensions(reference_data) == expected_size + + iframe_content = f"{INNER_IFRAME_STYLE}{DEFAULT_CONTENT}" + new_url = inline(f"{OUTER_IFRAME_STYLE}{iframe(iframe_content, domain=domain)}") + await bidi_session.browsing_context.navigate(context=top_context["context"], + url=new_url, + wait="complete") + + data = await bidi_session.browsing_context.capture_screenshot(context=top_context["context"]) + comparison = await compare_png_bidi(data, reference_data) + + assert comparison.equal() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/invalid.py new file mode 100644 index 0000000000..e30a0d3c99 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/invalid.py @@ -0,0 +1,26 @@ +import pytest +import webdriver.bidi.error as error + + +@pytest.mark.parametrize("value", [None, False, 42, {}, []]) +@pytest.mark.asyncio +async def test_params_context_invalid_type(bidi_session, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.capture_screenshot(context=value) + + +@pytest.mark.asyncio +async def test_invalid_frame(bidi_session, top_context, inline): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.browsing_context.capture_screenshot(context="_invalid_") + + +@pytest.mark.asyncio +async def test_closed_frame(bidi_session, top_context, inline, add_and_remove_iframe): + url = inline("
foo
") + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + frame_id = await add_and_remove_iframe(top_context) + with pytest.raises(error.NoSuchFrameException): + await bidi_session.browsing_context.capture_screenshot(context=frame_id) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/window_handle.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/window_handle.py new file mode 100644 index 0000000000..4f36fba197 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/window_handle.py @@ -0,0 +1,7 @@ +import pytest + +pytestmark = pytest.mark.asyncio + + +async def test_top_level_context_id_equals_window_handle(top_context, current_session): + assert top_context["context"] == current_session.window_handle diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/close.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/close.py new file mode 100644 index 0000000000..21bf7411e5 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/close.py @@ -0,0 +1,23 @@ +import pytest + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("type_hint", ["window", "tab"]) +async def test_top_level_context(bidi_session, type_hint): + top_level_context = await bidi_session.browsing_context.create( + type_hint=type_hint + ) + + contexts = await bidi_session.browsing_context.get_tree() + assert len(contexts) == 2 + + await bidi_session.browsing_context.close(context=top_level_context["context"]) + + contexts = await bidi_session.browsing_context.get_tree() + assert len(contexts) == 1 + + assert contexts[0]["context"] != top_level_context["context"] + + # TODO: Add a test for closing the last tab once the behavior has been specified + # https://github.com/w3c/webdriver-bidi/issues/187 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/invalid.py new file mode 100644 index 0000000000..7c73a83b13 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/invalid.py @@ -0,0 +1,31 @@ +import pytest +import webdriver.bidi.error as error + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("value", [None, False, 42, {}, []]) +async def test_params_context_invalid_type(bidi_session, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.close(context=value) + + +async def test_params_context_invalid_value(bidi_session): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.browsing_context.close(context="foo") + + +async def test_child_context(bidi_session, test_page_same_origin_frame, top_context): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_same_origin_frame, wait="complete" + ) + + all_contexts = await bidi_session.browsing_context.get_tree() + + assert len(all_contexts) == 1 + parent_info = all_contexts[0] + assert len(parent_info["children"]) == 1 + child_info = parent_info["children"][0] + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.close(context=child_info["context"]) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/context_created.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/context_created.py new file mode 100644 index 0000000000..93be00bbb6 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/context_created.py @@ -0,0 +1,225 @@ +import asyncio + +import pytest +from tests.support.sync import AsyncPoll +from webdriver.bidi.modules.script import ContextTarget +from webdriver.error import TimeoutException + +from .. import assert_browsing_context + +pytestmark = pytest.mark.asyncio + +CONTEXT_CREATED_EVENT = "browsingContext.contextCreated" + + +async def test_not_unsubscribed(bidi_session): + await bidi_session.session.subscribe(events=[CONTEXT_CREATED_EVENT]) + await bidi_session.session.unsubscribe(events=[CONTEXT_CREATED_EVENT]) + + # Track all received browsingContext.contextCreated events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener(CONTEXT_CREATED_EVENT, on_event) + + await bidi_session.browsing_context.create(type_hint="tab") + + wait = AsyncPoll(bidi_session, timeout=0.5) + with pytest.raises(TimeoutException): + await wait.until(lambda _: len(events) > 0) + + remove_listener() + + +@pytest.mark.parametrize("type_hint", ["tab", "window"]) +async def test_new_context(bidi_session, wait_for_event, subscribe_events, type_hint): + await subscribe_events([CONTEXT_CREATED_EVENT]) + + on_entry = wait_for_event(CONTEXT_CREATED_EVENT) + top_level_context = await bidi_session.browsing_context.create(type_hint="tab") + context_info = await on_entry + + assert_browsing_context( + context_info, + top_level_context["context"], + children=None, + url="about:blank", + parent=None, + ) + + +async def test_evaluate_window_open_without_url(bidi_session, subscribe_events, wait_for_event, top_context): + await subscribe_events([CONTEXT_CREATED_EVENT]) + + on_entry = wait_for_event(CONTEXT_CREATED_EVENT) + + await bidi_session.script.evaluate( + expression="""window.open();""", + target=ContextTarget(top_context["context"]), + await_promise=False) + + context_info = await on_entry + + assert_browsing_context( + context_info, + context=None, + children=None, + url="about:blank", + parent=None, + ) + + +async def test_evaluate_window_open_with_url(bidi_session, subscribe_events, wait_for_event, inline, top_context): + url = inline("
foo
") + + await subscribe_events([CONTEXT_CREATED_EVENT]) + + on_entry = wait_for_event(CONTEXT_CREATED_EVENT) + + await bidi_session.script.evaluate( + expression=f"""window.open("{url}");""", + target=ContextTarget(top_context["context"]), + await_promise=False) + context_info = await on_entry + + assert_browsing_context( + context_info, + context=None, + children=None, + url="about:blank", + parent=None, + ) + + +async def test_navigate_creates_iframes(bidi_session, subscribe_events, top_context, test_page_multiple_frames): + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener(CONTEXT_CREATED_EVENT, on_event) + await subscribe_events([CONTEXT_CREATED_EVENT]) + + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_multiple_frames, wait="complete" + ) + + wait = AsyncPoll( + bidi_session, message="Didn't receive context created events for frames" + ) + await wait.until(lambda _: len(events) >= 2) + assert len(events) == 2 + + # Get all browsing contexts from the first tab + contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"]) + + assert len(contexts) == 1 + root_info = contexts[0] + children_info = root_info["children"] + assert len(children_info) == 2 + + # Note: Live `browsingContext.contextCreated` events are always created with "about:blank": + # https://github.com/w3c/webdriver-bidi/issues/220#issuecomment-1145785349 + assert_browsing_context( + events[0], + children_info[0]["context"], + children=None, + url="about:blank", + parent=root_info["context"], + ) + + assert_browsing_context( + events[1], + children_info[1]["context"], + children=None, + url="about:blank", + parent=root_info["context"], + ) + + remove_listener() + + +async def test_navigate_creates_nested_iframes(bidi_session, subscribe_events, top_context, test_page_nested_frames): + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener(CONTEXT_CREATED_EVENT, on_event) + await subscribe_events([CONTEXT_CREATED_EVENT]) + + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_nested_frames, wait="complete" + ) + + wait = AsyncPoll( + bidi_session, message="Didn't receive context created events for frames" + ) + await wait.until(lambda _: len(events) >= 2) + assert len(events) == 2 + + # Get all browsing contexts from the first tab + contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"]) + + assert len(contexts) == 1 + root_info = contexts[0] + assert len(root_info["children"]) == 1 + child1_info = root_info["children"][0] + assert len(child1_info["children"]) == 1 + child2_info = child1_info["children"][0] + + # Note: `browsingContext.contextCreated` is always created with "about:blank": + # https://github.com/w3c/webdriver-bidi/issues/220#issuecomment-1145785349 + assert_browsing_context( + events[0], + child1_info["context"], + children=None, + url="about:blank", + parent=root_info["context"], + ) + + assert_browsing_context( + events[1], + child2_info["context"], + children=None, + url="about:blank", + parent=child1_info["context"], + ) + + remove_listener() + + +async def test_subscribe_to_one_context( + bidi_session, subscribe_events, top_context, test_page_same_origin_frame +): + # Subscribe to a specific context + await subscribe_events( + events=[CONTEXT_CREATED_EVENT], contexts=[top_context["context"]] + ) + + # Track all received browsingContext.contextCreated events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener(CONTEXT_CREATED_EVENT, on_event) + + await bidi_session.browsing_context.create(type_hint="tab") + + # Make sure we didn't receive the event for the new tab + wait = AsyncPoll(bidi_session, timeout=0.5) + with pytest.raises(TimeoutException): + await wait.until(lambda _: len(events) > 0) + + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_same_origin_frame, wait="complete" + ) + + # Make sure we received the event for the iframe + await wait.until(lambda _: len(events) >= 1) + assert len(events) == 1 + + remove_listener() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/invalid.py new file mode 100644 index 0000000000..2d60e08476 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/invalid.py @@ -0,0 +1,53 @@ +import pytest +import webdriver.bidi.error as error + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("value", [False, 42, {}, []]) +async def test_params_reference_context_invalid_type(bidi_session, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.create( + type_hint="tab", reference_context=value + ) + + +async def test_params_reference_context_invalid_value(bidi_session): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.browsing_context.create( + type_hint="tab", reference_context="foo" + ) + + +async def test_params_reference_context_non_top_level( + bidi_session, test_page_same_origin_frame, top_context +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=test_page_same_origin_frame, + wait="complete", + ) + + all_contexts = await bidi_session.browsing_context.get_tree() + + assert len(all_contexts) == 1 + parent_info = all_contexts[0] + assert len(parent_info["children"]) == 1 + child_info = parent_info["children"][0] + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.create( + type_hint="tab", reference_context=child_info["context"] + ) + + +@pytest.mark.parametrize("value", [None, False, 42, {}, []]) +async def test_params_type_invalid_type(bidi_session, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.create(type_hint=value) + + +@pytest.mark.parametrize("value", ["", "foo"]) +async def test_params_type_invalid_value(bidi_session, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.create(type_hint=value) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/reference_context.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/reference_context.py new file mode 100644 index 0000000000..f8a834069a --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/reference_context.py @@ -0,0 +1,46 @@ +import pytest + +from .. import assert_browsing_context +from webdriver.bidi.modules.script import ContextTarget + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("value", ["tab", "window"]) +async def test_reference_context(bidi_session, value): + contexts = await bidi_session.browsing_context.get_tree(max_depth=0) + assert len(contexts) == 1 + + reference_context = await bidi_session.browsing_context.create(type_hint="tab") + contexts = await bidi_session.browsing_context.get_tree(max_depth=0) + assert len(contexts) == 2 + + new_context = await bidi_session.browsing_context.create( + reference_context=reference_context["context"], type_hint=value + ) + assert contexts[0]["context"] != new_context["context"] + assert contexts[0]["context"] != new_context["context"] + + contexts = await bidi_session.browsing_context.get_tree(max_depth=0) + assert len(contexts) == 3 + + # Retrieve the new context info + contexts = await bidi_session.browsing_context.get_tree( + max_depth=0, root=new_context["context"] + ) + + assert_browsing_context( + contexts[0], + new_context["context"], + children=None, + is_root=True, + parent=None, + url="about:blank", + ) + + # We can not assert the specific behavior of reference_context here, + # so we only verify that a new browsing context was successfully created + # when a valid reference_context is provided. + + await bidi_session.browsing_context.close(context=reference_context["context"]) + await bidi_session.browsing_context.close(context=new_context["context"]) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/type.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/type.py new file mode 100644 index 0000000000..55ce7b4428 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/type.py @@ -0,0 +1,41 @@ +import pytest + +from .. import assert_browsing_context +from webdriver.bidi.modules.script import ContextTarget + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("value", ["tab", "window"]) +async def test_type(bidi_session, value): + contexts = await bidi_session.browsing_context.get_tree(max_depth=0) + assert len(contexts) == 1 + + new_context = await bidi_session.browsing_context.create(type_hint=value) + assert contexts[0]["context"] != new_context["context"] + + # Check there is an additional browsing context + contexts = await bidi_session.browsing_context.get_tree(max_depth=0) + assert len(contexts) == 2 + + # Retrieve the new context info + contexts = await bidi_session.browsing_context.get_tree( + max_depth=0, root=new_context["context"] + ) + + assert_browsing_context( + contexts[0], + new_context["context"], + children=None, + is_root=True, + parent=None, + url="about:blank", + ) + + opener_protocol_value = await bidi_session.script.evaluate( + expression="!!window.opener", + target=ContextTarget(new_context["context"]), + await_promise=False) + assert opener_protocol_value["value"] is False + + await bidi_session.browsing_context.close(context=new_context["context"]) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/dom_content_loaded.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/dom_content_loaded.py new file mode 100644 index 0000000000..9723ee4a4e --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/dom_content_loaded.py @@ -0,0 +1,148 @@ +import pytest +from tests.support.sync import AsyncPoll +from webdriver.bidi.modules.script import ContextTarget + +from ... import int_interval +from .. import assert_navigation_info + +pytestmark = pytest.mark.asyncio + +DOM_CONTENT_LOADED_EVENT = "browsingContext.domContentLoaded" + + +async def test_unsubscribe(bidi_session, inline, top_context): + await bidi_session.session.subscribe(events=[DOM_CONTENT_LOADED_EVENT]) + await bidi_session.session.unsubscribe(events=[DOM_CONTENT_LOADED_EVENT]) + + # Track all received browsingContext.domContentLoaded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener( + DOM_CONTENT_LOADED_EVENT, on_event + ) + + url = inline("
foo
") + + # When navigation reaches complete state, + # we should have received a browsingContext.domContentLoaded event + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + + assert len(events) == 0 + + remove_listener() + + +async def test_subscribe(bidi_session, subscribe_events, inline, new_tab, wait_for_event): + await subscribe_events(events=[DOM_CONTENT_LOADED_EVENT]) + + on_entry = wait_for_event(DOM_CONTENT_LOADED_EVENT) + url = inline("
foo
") + await bidi_session.browsing_context.navigate(context=new_tab["context"], url=url) + event = await on_entry + + assert_navigation_info(event, {"context": new_tab["context"], "url": url}) + + +async def test_timestamp(bidi_session, current_time, subscribe_events, inline, new_tab, wait_for_event): + await subscribe_events(events=[DOM_CONTENT_LOADED_EVENT]) + + time_start = await current_time() + + on_entry = wait_for_event(DOM_CONTENT_LOADED_EVENT) + url = inline("
foo
") + await bidi_session.browsing_context.navigate(context=new_tab["context"], url=url) + event = await on_entry + + time_end = await current_time() + + assert_navigation_info( + event, + {"context": new_tab["context"], "timestamp": int_interval(time_start, time_end)} + ) + + +async def test_iframe(bidi_session, subscribe_events, new_tab, test_page, test_page_same_origin_frame): + events = [] + + async def on_event(method, data): + # Filter out events for about:blank to avoid browser differences + if data["url"] != 'about:blank': + events.append(data) + + remove_listener = bidi_session.add_event_listener( + DOM_CONTENT_LOADED_EVENT, on_event + ) + await subscribe_events(events=[DOM_CONTENT_LOADED_EVENT]) + + await bidi_session.browsing_context.navigate( + context=new_tab["context"], url=test_page_same_origin_frame + ) + + wait = AsyncPoll( + bidi_session, message="Didn't receive dom content loaded events for frames" + ) + await wait.until(lambda _: len(events) >= 2) + assert len(events) == 2 + + contexts = await bidi_session.browsing_context.get_tree(root=new_tab["context"]) + + assert len(contexts) == 1 + root_info = contexts[0] + assert len(root_info["children"]) == 1 + child_info = root_info["children"][0] + + # The ordering of the domContentLoaded event is not guaranteed between the + # root page and the iframe, find the appropriate events in the current list. + first_is_root = events[0]["context"] == root_info["context"] + root_event = events[0] if first_is_root else events[1] + child_event = events[1] if first_is_root else events[0] + + assert_navigation_info( + root_event, + {"context": root_info["context"], "url": test_page_same_origin_frame} + ) + assert_navigation_info(child_event, {"context": child_info["context"], "url": test_page}) + + remove_listener() + + +@pytest.mark.parametrize("type_hint", ["tab", "window"]) +async def test_new_context(bidi_session, subscribe_events, wait_for_event, type_hint): + await subscribe_events(events=[DOM_CONTENT_LOADED_EVENT]) + + on_entry = wait_for_event(DOM_CONTENT_LOADED_EVENT) + new_context = await bidi_session.browsing_context.create(type_hint=type_hint) + event = await on_entry + + assert_navigation_info(event, {"context": new_context["context"], "url": "about:blank"}) + + +async def test_document_write(bidi_session, subscribe_events, top_context, wait_for_event): + await subscribe_events(events=[DOM_CONTENT_LOADED_EVENT]) + + on_entry = wait_for_event(DOM_CONTENT_LOADED_EVENT) + + await bidi_session.script.evaluate( + expression="""document.open(); document.write("

Replaced

"); document.close();""", + target=ContextTarget(top_context["context"]), + await_promise=False + ) + + event = await on_entry + assert_navigation_info(event, {"context": top_context["context"]}) + + +async def test_page_with_base_tag(bidi_session, subscribe_events, inline, new_tab, wait_for_event): + await subscribe_events(events=[DOM_CONTENT_LOADED_EVENT]) + + on_entry = wait_for_event(DOM_CONTENT_LOADED_EVENT) + url = inline("""""") + await bidi_session.browsing_context.navigate(context=new_tab["context"], url=url) + event = await on_entry + + assert_navigation_info(event, {"context": new_tab["context"], "url": url}) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/frames.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/frames.py new file mode 100644 index 0000000000..b1936d31d0 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/frames.py @@ -0,0 +1,90 @@ +import pytest + +from .. import assert_browsing_context + +pytestmark = pytest.mark.asyncio + + +async def test_multiple_frames( + bidi_session, + top_context, + test_page, + test_page2, + test_page_multiple_frames, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_multiple_frames, wait="complete" + ) + + # First retrieve all browsing contexts of the first tab + top_level_context_id = top_context["context"] + all_contexts = await bidi_session.browsing_context.get_tree(root=top_level_context_id) + + assert len(all_contexts) == 1 + root_info = all_contexts[0] + assert_browsing_context( + root_info, + top_level_context_id, + children=2, + parent=None, + url=test_page_multiple_frames, + ) + + child1_info = root_info["children"][0] + assert_browsing_context( + child1_info, + context=None, + children=0, + is_root=False, + parent=None, + url=test_page, + ) + assert child1_info["context"] != root_info["context"] + + child2_info = root_info["children"][1] + assert_browsing_context( + child2_info, + context=None, + children=0, + is_root=False, + parent=None, + url=test_page2, + ) + assert child2_info["context"] != root_info["context"] + assert child2_info["context"] != child1_info["context"] + + +async def test_cross_origin( + bidi_session, + top_context, + test_page_cross_origin, + test_page_cross_origin_frame, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_cross_origin_frame, wait="complete" + ) + + # First retrieve all browsing contexts of the first tab + top_level_context_id = top_context["context"] + all_contexts = await bidi_session.browsing_context.get_tree(root=top_level_context_id) + + assert len(all_contexts) == 1 + root_info = all_contexts[0] + assert_browsing_context( + root_info, + top_level_context_id, + children=1, + parent=None, + url=test_page_cross_origin_frame, + ) + + child1_info = root_info["children"][0] + assert_browsing_context( + child1_info, + context=None, + children=0, + is_root=False, + parent=None, + url=test_page_cross_origin, + ) + assert child1_info["context"] != root_info["context"] diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/invalid.py new file mode 100644 index 0000000000..dbc93155e9 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/invalid.py @@ -0,0 +1,27 @@ +import pytest +import webdriver.bidi.error as error + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("value", [False, "foo", {}, []]) +async def test_params_max_depth_invalid_type(bidi_session, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.get_tree(max_depth=value) + + +@pytest.mark.parametrize("value", [-1, 1.1, 2**53]) +async def test_params_max_depth_invalid_value(bidi_session, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.get_tree(max_depth=value) + + +@pytest.mark.parametrize("value", [False, 42, {}, []]) +async def test_params_root_invalid_type(bidi_session, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.get_tree(root=value) + + +async def test_params_root_invalid_value(bidi_session): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.browsing_context.get_tree(root="foo") diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/max_depth.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/max_depth.py new file mode 100644 index 0000000000..ca1d0edfa1 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/max_depth.py @@ -0,0 +1,121 @@ +import pytest + +from .. import assert_browsing_context + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("value", [0, 2**53 - 1]) +async def test_params_boundaries(bidi_session, value): + await bidi_session.browsing_context.get_tree(max_depth=value) + + +async def test_null( + bidi_session, + top_context, + test_page, + test_page_same_origin_frame, + test_page_nested_frames, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_nested_frames, wait="complete" + ) + + # Retrieve browsing contexts for first tab only + top_level_context_id = top_context["context"] + contexts = await bidi_session.browsing_context.get_tree(root=top_level_context_id) + + assert len(contexts) == 1 + root_info = contexts[0] + assert_browsing_context( + root_info, + top_level_context_id, + children=1, + parent=None, + url=test_page_nested_frames, + ) + + child1_info = root_info["children"][0] + assert_browsing_context( + child1_info, + context=None, + children=1, + is_root=False, + parent=None, + url=test_page_same_origin_frame, + ) + assert child1_info["context"] != root_info["context"] + + child2_info = child1_info["children"][0] + assert_browsing_context( + child2_info, + context=None, + children=0, + is_root=False, + parent=None, + url=test_page, + ) + assert child2_info["context"] != root_info["context"] + assert child2_info["context"] != child1_info["context"] + + +async def test_top_level_only(bidi_session, top_context, test_page_nested_frames): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_nested_frames, wait="complete" + ) + + # Retrieve browsing contexts for first tab only + top_level_context_id = top_context["context"] + contexts = await bidi_session.browsing_context.get_tree( + max_depth=0, + root=top_level_context_id + ) + + assert len(contexts) == 1 + root_info = contexts[0] + assert_browsing_context( + root_info, + top_level_context_id, + children=None, + parent=None, + url=test_page_nested_frames, + ) + + +async def test_top_level_and_one_child( + bidi_session, + top_context, + test_page_nested_frames, + test_page_same_origin_frame, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_nested_frames, wait="complete" + ) + + # Retrieve browsing contexts for first tab only + top_level_context_id = top_context["context"] + contexts = await bidi_session.browsing_context.get_tree( + max_depth=1, + root=top_level_context_id + ) + + assert len(contexts) == 1 + root_info = contexts[0] + assert_browsing_context( + root_info, + top_level_context_id, + children=1, + parent=None, + url=test_page_nested_frames, + ) + + child1_info = root_info["children"][0] + assert_browsing_context( + child1_info, + context=None, + children=None, + is_root=False, + parent=None, + url=test_page_same_origin_frame, + ) + assert child1_info["context"] != root_info["context"] diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/root.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/root.py new file mode 100644 index 0000000000..74d11c6003 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/root.py @@ -0,0 +1,113 @@ +import pytest + +from .. import assert_browsing_context + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("type_hint", ["tab", "window"]) +async def test_null(bidi_session, top_context, test_page, type_hint): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page, wait="complete" + ) + + current_top_level_context_id = top_context["context"] + other_top_level_context = await bidi_session.browsing_context.create(type_hint=type_hint) + other_top_level_context_id = other_top_level_context["context"] + + # Retrieve all top-level browsing contexts + contexts = await bidi_session.browsing_context.get_tree(root=None) + + assert len(contexts) == 2 + if contexts[0]["context"] == current_top_level_context_id: + current_info = contexts[0] + other_info = contexts[1] + else: + current_info = contexts[1] + other_info = contexts[0] + + assert_browsing_context( + current_info, + current_top_level_context_id, + children=0, + parent=None, + url=test_page, + ) + + assert_browsing_context( + other_info, + other_top_level_context_id, + children=0, + parent=None, + url="about:blank", + ) + + +@pytest.mark.parametrize("type_hint", ["tab", "window"]) +async def test_top_level_context(bidi_session, top_context, test_page, type_hint): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page, wait="complete" + ) + + other_top_level_context = await bidi_session.browsing_context.create(type_hint=type_hint) + other_top_level_context_id = other_top_level_context["context"] + # Retrieve all browsing contexts of the newly opened tab/window + contexts = await bidi_session.browsing_context.get_tree(root=other_top_level_context_id) + + assert len(contexts) == 1 + assert_browsing_context( + contexts[0], + other_top_level_context_id, + children=0, + parent=None, + url="about:blank", + ) + + +async def test_child_context( + bidi_session, + top_context, + test_page_same_origin_frame, + test_page_nested_frames, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_nested_frames, wait="complete" + ) + + # First retrieve all browsing contexts for the first tab + top_level_context_id = top_context["context"] + all_contexts = await bidi_session.browsing_context.get_tree(root=top_level_context_id) + + assert len(all_contexts) == 1 + root_info = all_contexts[0] + assert_browsing_context( + root_info, + top_level_context_id, + children=1, + parent=None, + url=test_page_nested_frames, + ) + + child1_info = root_info["children"][0] + assert_browsing_context( + child1_info, + context=None, + children=1, + is_root=False, + parent=None, + url=test_page_same_origin_frame, + ) + + # Now retrieve all browsing contexts for the first browsing context child + child_contexts = await bidi_session.browsing_context.get_tree(root=child1_info["context"]) + + assert len(child_contexts) == 1 + assert_browsing_context( + child_contexts[0], + root_info["children"][0]["context"], + children=1, + parent=root_info["context"], + url=test_page_same_origin_frame, + ) + + assert child1_info["children"][0] == child_contexts[0]["children"][0] diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/load.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/load.py new file mode 100644 index 0000000000..b4b174818c --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/load.py @@ -0,0 +1,137 @@ +import pytest +from tests.support.sync import AsyncPoll +from webdriver.bidi.modules.script import ContextTarget +from webdriver.error import TimeoutException + +from ... import int_interval +from .. import assert_navigation_info + +pytestmark = pytest.mark.asyncio + +CONTEXT_LOAD_EVENT = "browsingContext.load" + + +async def test_not_unsubscribed(bidi_session, inline, top_context): + await bidi_session.session.subscribe(events=[CONTEXT_LOAD_EVENT]) + await bidi_session.session.unsubscribe(events=[CONTEXT_LOAD_EVENT]) + + # Track all received browsingContext.load events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener(CONTEXT_LOAD_EVENT, on_event) + + url = inline("
foo
") + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url + ) + + wait = AsyncPoll(bidi_session, timeout=0.5) + with pytest.raises(TimeoutException): + await wait.until(lambda _: len(events) > 0) + + remove_listener() + + +async def test_subscribe(bidi_session, subscribe_events, inline, new_tab, wait_for_event): + await subscribe_events(events=[CONTEXT_LOAD_EVENT]) + + on_entry = wait_for_event(CONTEXT_LOAD_EVENT) + url = inline("
foo
") + await bidi_session.browsing_context.navigate(context=new_tab["context"], url=url) + event = await on_entry + + assert_navigation_info(event, {"context": new_tab["context"], "url": url}) + + +async def test_timestamp(bidi_session, current_time, subscribe_events, inline, new_tab, wait_for_event): + await subscribe_events(events=[CONTEXT_LOAD_EVENT]) + + time_start = await current_time() + + on_entry = wait_for_event(CONTEXT_LOAD_EVENT) + url = inline("
foo
") + await bidi_session.browsing_context.navigate(context=new_tab["context"], url=url) + event = await on_entry + + time_end = await current_time() + + assert_navigation_info( + event, + {"context": new_tab["context"], "timestamp": int_interval(time_start, time_end)} + ) + + +async def test_iframe(bidi_session, subscribe_events, new_tab, test_page, test_page_same_origin_frame): + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener(CONTEXT_LOAD_EVENT, on_event) + await subscribe_events(events=[CONTEXT_LOAD_EVENT]) + + await bidi_session.browsing_context.navigate( + context=new_tab["context"], url=test_page_same_origin_frame + ) + + wait = AsyncPoll( + bidi_session, message="Didn't receive context load events for frames" + ) + await wait.until(lambda _: len(events) >= 2) + assert len(events) == 2 + + contexts = await bidi_session.browsing_context.get_tree(root=new_tab["context"]) + + assert len(contexts) == 1 + root_info = contexts[0] + assert len(root_info["children"]) == 1 + child_info = root_info["children"][0] + + # First load event comes from iframe + assert_navigation_info(events[0], {"context": child_info["context"], "url": test_page}) + assert_navigation_info( + events[1], + {"context": root_info["context"], "url": test_page_same_origin_frame} + ) + + remove_listener() + + +@pytest.mark.parametrize("type_hint", ["tab", "window"]) +async def test_new_context(bidi_session, subscribe_events, wait_for_event, type_hint): + await subscribe_events(events=[CONTEXT_LOAD_EVENT]) + + on_entry = wait_for_event(CONTEXT_LOAD_EVENT) + new_context = await bidi_session.browsing_context.create(type_hint=type_hint) + event = await on_entry + + assert_navigation_info(event, {"context": new_context["context"], "url": "about:blank"}) + + +async def test_document_write(bidi_session, subscribe_events, top_context, wait_for_event): + await subscribe_events(events=[CONTEXT_LOAD_EVENT]) + + on_entry = wait_for_event(CONTEXT_LOAD_EVENT) + + await bidi_session.script.evaluate( + expression="""document.open(); document.write("

Replaced

"); document.close();""", + target=ContextTarget(top_context["context"]), + await_promise=False + ) + + event = await on_entry + assert_navigation_info(event, {"context": top_context["context"]}) + + +async def test_page_with_base_tag(bidi_session, subscribe_events, inline, new_tab, wait_for_event): + await subscribe_events(events=[CONTEXT_LOAD_EVENT]) + + on_entry = wait_for_event(CONTEXT_LOAD_EVENT) + url = inline("""""") + await bidi_session.browsing_context.navigate(context=new_tab["context"], url=url) + event = await on_entry + + assert_navigation_info(event, {"context": new_tab["context"], "url": url}) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/__init__.py new file mode 100644 index 0000000000..9b7d28f6da --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/__init__.py @@ -0,0 +1,25 @@ +import pytest + +from webdriver.bidi.error import UnknownErrorException + + +async def navigate_and_assert(bidi_session, context, url, wait="complete", expected_error=False): + if expected_error: + with pytest.raises(UnknownErrorException): + await bidi_session.browsing_context.navigate( + context=context['context'], url=url, wait=wait + ) + + else: + result = await bidi_session.browsing_context.navigate( + context=context['context'], url=url, wait=wait + ) + assert result["url"] == url + + contexts = await bidi_session.browsing_context.get_tree( + root=context['context'] + ) + assert len(contexts) == 1 + assert contexts[0]["url"] == url + + return contexts diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/about_blank.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/about_blank.py new file mode 100644 index 0000000000..1f6d4774ae --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/about_blank.py @@ -0,0 +1,33 @@ +import pytest + +from . import navigate_and_assert + +pytestmark = pytest.mark.asyncio + +PAGE_ABOUT_BLANK = "about:blank" +PAGE_EMPTY = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html" + + +async def test_navigate_from_single_page(bidi_session, new_tab, url): + await navigate_and_assert(bidi_session, new_tab, url(PAGE_EMPTY)) + await navigate_and_assert(bidi_session, new_tab, PAGE_ABOUT_BLANK) + + +async def test_navigate_from_frameset(bidi_session, inline, new_tab, url): + frame_url = url(PAGE_EMPTY) + url_before = inline(f"") + contexts = await navigate_and_assert(bidi_session, new_tab, url_before) + + assert len(contexts[0]["children"]) == 1 + frame = contexts[0]["children"][0] + assert frame["url"] == frame_start_url + + await navigate_and_assert(bidi_session, frame, PAGE_ABOUT_BLANK) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/data_url.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/data_url.py new file mode 100644 index 0000000000..8fd5695646 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/data_url.py @@ -0,0 +1,101 @@ +from urllib.parse import quote + +import pytest + +from . import navigate_and_assert + +pytestmark = pytest.mark.asyncio + + +def dataURL(doc, mime_type="text/html", charset="utf-8", isBase64=False): + encoding = "" + if charset: + encoding = f"charset={charset}" + elif isBase64: + encoding = "base64" + + return f"data:{mime_type};{encoding},{quote(doc)}" + + +HTML_BAR = dataURL("

bar

") +HTML_FOO = dataURL("

foo

") +IMG_BLACK_PIXEL = dataURL( + "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==", + "image/png", + None, + True, +) +IMG_RED_PIXEL = dataURL( + "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEX/TQBcNTh/AAAAAXRSTlPM0jRW/QAAAApJREFUeJxjYgAAAAYAAzY3fKgAAAAASUVORK5CYII=", + "image/png", + None, + True, +) +PAGE = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html" +TEXT_BAR = dataURL("bar", "text/plain") +TEXT_FOO = dataURL("foo", "text/plain") + + +def wrap_content_in_url(url, content): + """Check if content is not data url and wrap it in the url function""" + if content.startswith("data:"): + return content + return url(content) + + +@pytest.mark.parametrize( + "url_before, url_after", + [ + (PAGE, IMG_BLACK_PIXEL), + (IMG_BLACK_PIXEL, IMG_RED_PIXEL), + (IMG_BLACK_PIXEL, HTML_FOO), + (IMG_BLACK_PIXEL, PAGE), + (PAGE, HTML_FOO), + (HTML_FOO, TEXT_FOO), + (HTML_FOO, HTML_BAR), + (HTML_FOO, PAGE), + (PAGE, TEXT_FOO), + (TEXT_FOO, TEXT_BAR), + (TEXT_FOO, IMG_BLACK_PIXEL), + (TEXT_FOO, PAGE), + ], + ids=[ + "document to data:image", + "data:image to data:image", + "data:image to data:html", + "data:image to document", + "document to data:html", + "data:html to data:html", + "data:html to data:text", + "data:html to document", + "document to data:text", + "data:text to data:text", + "data:text to data:image", + "data:text to document", + ], +) +async def test_navigate_from_single_page( + bidi_session, new_tab, url, url_before, url_after +): + await navigate_and_assert( + bidi_session, + new_tab, + wrap_content_in_url(url, url_before), + ) + await navigate_and_assert( + bidi_session, + new_tab, + wrap_content_in_url(url, url_after), + ) + + +async def test_navigate_in_iframe(bidi_session, inline, new_tab): + frame_start_url = inline("frame") + url_before = inline(f"") + contexts = await navigate_and_assert(bidi_session, new_tab, url_before) + + assert len(contexts[0]["children"]) == 1 + frame = contexts[0]["children"][0] + assert frame["url"] == frame_start_url + + await navigate_and_assert(bidi_session, frame, HTML_BAR) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/error.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/error.py new file mode 100644 index 0000000000..b5d9a9d8fe --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/error.py @@ -0,0 +1,22 @@ +import pytest + +from . import navigate_and_assert + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize( + "url", + [ + "thisprotocoldoesnotexist://", + "http://doesnotexist.localhost/", + "http://localhost:0", + ], + ids=[ + "protocol", + "host", + "port", + ] +) +async def test_invalid_address(bidi_session, new_tab, url): + await navigate_and_assert(bidi_session, new_tab, url, expected_error=True) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/frame.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/frame.py new file mode 100644 index 0000000000..2c2131b6ee --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/frame.py @@ -0,0 +1,59 @@ +import pytest + +from . import navigate_and_assert + +pytestmark = pytest.mark.asyncio + +PAGE_CONTENT = "
foo
" + + +@pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"]) +async def test_origin(bidi_session, new_tab, inline, domain): + frame_start_url = inline("frame") + url_before = inline(f"", domain=domain) + contexts = await navigate_and_assert(bidi_session, new_tab, url_before) + + assert len(contexts[0]["children"]) == 1 + frame = contexts[0]["children"][0] + assert frame["url"] == frame_start_url + + await navigate_and_assert(bidi_session, frame, inline(PAGE_CONTENT)) + + +async def test_multiple_frames( + bidi_session, new_tab, test_page_multiple_frames, test_page, test_page2, inline +): + contexts = await navigate_and_assert( + bidi_session, new_tab, test_page_multiple_frames + ) + + assert len(contexts[0]["children"]) == 2 + frame = contexts[0]["children"][0] + assert frame["url"] == test_page + + await navigate_and_assert(bidi_session, frame, inline(PAGE_CONTENT)) + + # Make sure that the sesond frame hasn't been navigated + contexts = await bidi_session.browsing_context.get_tree(root=new_tab["context"]) + assert contexts[0]["children"][1]["url"] == test_page2 + + +async def test_nested_frames( + bidi_session, + new_tab, + inline, + test_page_nested_frames, + test_page_same_origin_frame, + test_page, +): + contexts = await navigate_and_assert(bidi_session, new_tab, test_page_nested_frames) + + assert len(contexts[0]["children"]) == 1 + frame_level_1 = contexts[0]["children"][0] + assert frame_level_1["url"] == test_page_same_origin_frame + + assert len(frame_level_1["children"]) == 1 + frame_level_2 = frame_level_1["children"][0] + assert frame_level_2["url"] == test_page + + await navigate_and_assert(bidi_session, frame_level_2, inline(PAGE_CONTENT)) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/hash.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/hash.py new file mode 100644 index 0000000000..d4862a6201 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/hash.py @@ -0,0 +1,62 @@ +import pytest + +from . import navigate_and_assert + +pytestmark = pytest.mark.asyncio + +PAGE_EMPTY = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html" +PAGE_EMPTY_WITH_HASH_FOO = f"{PAGE_EMPTY}#foo" +PAGE_OTHER = "/webdriver/tests/bidi/browsing_context/navigate/support/other.html" + + +@pytest.mark.parametrize( + "hash_before, hash_after", + [ + ("", "#foo"), + ("#foo", "#bar"), + ("#foo", "#foo"), + ("#bar", ""), + ], + ids=[ + "without hash to with hash", + "with different hashes", + "with identical hashes", + "with hash to without hash", + ], +) +async def test_navigate_in_the_same_document( + bidi_session, new_tab, url, hash_before, hash_after +): + await navigate_and_assert(bidi_session, new_tab, url(PAGE_EMPTY + hash_before)) + await navigate_and_assert(bidi_session, new_tab, url(PAGE_EMPTY + hash_after)) + + +@pytest.mark.parametrize( + "url_before, url_after", + [ + (PAGE_EMPTY_WITH_HASH_FOO, f"{PAGE_OTHER}#foo"), + (PAGE_EMPTY_WITH_HASH_FOO, f"{PAGE_OTHER}#bar"), + ], + ids=[ + "with identical hashes", + "with different hashes", + ], +) +async def test_navigate_different_documents( + bidi_session, new_tab, url, url_before, url_after +): + await navigate_and_assert(bidi_session, new_tab, url(url_before)) + await navigate_and_assert(bidi_session, new_tab, url(url_after)) + + +async def test_navigate_in_iframe(bidi_session, inline, new_tab): + frame_start_url = inline("frame") + url_before = inline(f"") + contexts = await navigate_and_assert(bidi_session, new_tab, url_before) + + assert len(contexts[0]["children"]) == 1 + frame = contexts[0]["children"][0] + assert frame["url"] == frame_start_url + + url_after = f"{frame_start_url}#foo" + await navigate_and_assert(bidi_session, frame, url_after) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/image.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/image.py new file mode 100644 index 0000000000..b52ea9787c --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/image.py @@ -0,0 +1,56 @@ +import pytest + +from . import navigate_and_assert + +pytestmark = pytest.mark.asyncio + +PAGE_EMPTY = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html" +PNG_BLACK_DOT = "/webdriver/tests/bidi/browsing_context/navigate/support/black_dot.png" +PNG_RED_DOT = "/webdriver/tests/bidi/browsing_context/navigate/support/red_dot.png" +SVG = "/webdriver/tests/bidi/browsing_context/navigate/support/other.svg" + + +@pytest.mark.parametrize( + "url_before, url_after", + [ + (PAGE_EMPTY, SVG), + (SVG, PAGE_EMPTY), + (PAGE_EMPTY, PNG_BLACK_DOT), + (PNG_BLACK_DOT, PNG_RED_DOT), + (PNG_RED_DOT, SVG), + (PNG_BLACK_DOT, PAGE_EMPTY), + ], + ids=[ + "document to svg", + "svg to document", + "document to png", + "png to png", + "png to svg", + "png to document", + ], +) +async def test_navigate_between_img_and_html( + bidi_session, new_tab, url, url_before, url_after +): + await navigate_and_assert(bidi_session, new_tab, url(url_before)) + await navigate_and_assert(bidi_session, new_tab, url(url_after)) + + +@pytest.mark.parametrize( + "img", + [SVG, PNG_BLACK_DOT], + ids=[ + "to svg", + "to png", + ], +) +async def test_navigate_in_iframe(bidi_session, new_tab, inline, url, img): + frame_start_url = inline("frame") + url_before = inline(f"") + contexts = await navigate_and_assert(bidi_session, new_tab, url_before) + + assert len(contexts[0]["children"]) == 1 + frame = contexts[0]["children"][0] + assert frame["url"] == frame_start_url + + await navigate_and_assert(bidi_session, frame, url(img)) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/invalid.py new file mode 100644 index 0000000000..3ea45f0666 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/invalid.py @@ -0,0 +1,52 @@ +import pytest +import webdriver.bidi.error as error + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("value", [None, False, 42, {}, []]) +async def test_params_context_invalid_type(bidi_session, inline, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.navigate( + context=value, url=inline("

foo") + ) + + +@pytest.mark.parametrize("value", ["", "somestring"]) +async def test_params_context_invalid_value(bidi_session, inline, value): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.browsing_context.navigate( + context=value, url=inline("

foo") + ) + + +@pytest.mark.parametrize("value", [None, False, 42, {}, []]) +async def test_params_url_invalid_type(bidi_session, new_tab, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.navigate( + context=new_tab["context"], url=value + ) + + +@pytest.mark.parametrize("value", ["http://:invalid", "http://#invalid"]) +async def test_params_url_invalid_value(bidi_session, new_tab, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.navigate( + context=new_tab["context"], url=value + ) + + +@pytest.mark.parametrize("value", [False, 42, {}, []]) +async def test_params_wait_invalid_type(bidi_session, inline, new_tab, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.navigate( + context=new_tab["context"], url=inline("

bar"), wait=value + ) + + +@pytest.mark.parametrize("value", ["", "somestring"]) +async def test_params_wait_invalid_value(bidi_session, inline, new_tab, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.navigate( + context=new_tab["context"], url=inline("

bar"), wait=value + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/navigate.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/navigate.py new file mode 100644 index 0000000000..a35f2728ef --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/navigate.py @@ -0,0 +1,83 @@ +import asyncio + +import pytest + +from . import navigate_and_assert + +pytestmark = pytest.mark.asyncio + + +async def test_payload(bidi_session, inline, new_tab): + url = inline("

foo
") + result = await bidi_session.browsing_context.navigate( + context=new_tab["context"], url=url + ) + + assert "navigation" in result + assert result["url"] == url + + +async def test_interactive_simultaneous_navigation(bidi_session, inline, new_tab): + frame1_start_url = inline("frame1") + frame2_start_url = inline("frame2") + + url = inline( + f"" + ) + + contexts = await navigate_and_assert(bidi_session, new_tab, url) + assert len(contexts[0]["children"]) == 2 + + frame1_context_id = contexts[0]["children"][0]["context"] + frame2_context_id = contexts[0]["children"][1]["context"] + + # The goal here is to navigate both iframes in parallel, and to use the + # interactive wait condition for both. + # Make sure that monitoring the DOMContentLoaded event for one frame does + # prevent monitoring it for the other frame. + img_url = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg" + script_url = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.js" + # frame1 also has a slow loading image so that it won't reach a complete + # navigation, and we can make sure we resolved with the interactive state. + frame1_url = inline( + f"""frame1_new + + """ + ) + frame2_url = inline( + f"frame2_new" + ) + + frame1_task = asyncio.ensure_future( + bidi_session.browsing_context.navigate( + context=frame1_context_id, url=frame1_url, wait="interactive" + ) + ) + + frame2_result = await bidi_session.browsing_context.navigate( + context=frame2_context_id, url=frame2_url, wait="interactive" + ) + assert frame2_result["url"] == frame2_url + + # The "interactive" navigation should resolve before the 5 seconds timeout. + await asyncio.wait_for(frame1_task, timeout=5) + + frame1_result = frame1_task.result() + assert frame1_result["url"] == frame1_url + + contexts = await bidi_session.browsing_context.get_tree(root=new_tab["context"]) + assert contexts[0]["children"][0]["url"] == frame1_url + assert contexts[0]["children"][1]["url"] == frame2_url + + +async def test_relative_url(bidi_session, new_tab, url): + url_before = url( + "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html" + ) + + # Navigate to page1 with wait=interactive to make sure the document's base URI + # was updated. + await navigate_and_assert(bidi_session, new_tab, url_before, "interactive") + + url_after = url_before.replace("empty.html", "other.html") + await navigate_and_assert(bidi_session, new_tab, url_after, "interactive") diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/black_dot.png b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/black_dot.png new file mode 100644 index 0000000000..613754cfaf Binary files /dev/null and b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/black_dot.png differ diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.html b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.html new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.js b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.js new file mode 100644 index 0000000000..3918c74e44 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.js @@ -0,0 +1 @@ +"use strict"; diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg new file mode 100644 index 0000000000..e0af766e8f --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg @@ -0,0 +1,2 @@ + + diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.html b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.html new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.svg b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.svg new file mode 100644 index 0000000000..7c20a99a4b --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.svg @@ -0,0 +1,3 @@ + + + diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/red_dot.png b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/red_dot.png new file mode 100644 index 0000000000..c5916f2897 Binary files /dev/null and b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/red_dot.png differ diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/wait.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/wait.py new file mode 100644 index 0000000000..9a0b14e755 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/wait.py @@ -0,0 +1,98 @@ +import pytest +import asyncio + +pytestmark = pytest.mark.asyncio + + +async def wait_for_navigation(bidi_session, context, url, wait, expect_timeout): + # Ultimately, "interactive" and "complete" should support a timeout argument. + # See https://github.com/w3c/webdriver-bidi/issues/188. + if expect_timeout: + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for( + asyncio.shield(bidi_session.browsing_context.navigate( + context=context, url=url, wait=wait + )), + timeout=1, + ) + else: + await bidi_session.browsing_context.navigate( + context=context, url=url, wait=wait + ) + + +@pytest.mark.parametrize("value", ["none", "interactive", "complete"]) +async def test_expected_url(bidi_session, inline, new_tab, value): + url = inline("
foo
") + result = await bidi_session.browsing_context.navigate( + context=new_tab["context"], url=url, wait=value + ) + assert result["url"] == url + if value != "none": + contexts = await bidi_session.browsing_context.get_tree( + root=new_tab["context"], max_depth=0 + ) + assert contexts[0]["url"] == url + + +@pytest.mark.parametrize( + "wait, expect_timeout", + [ + ("none", False), + ("interactive", False), + ("complete", True), + ], +) +async def test_slow_image_blocks_load(bidi_session, inline, new_tab, wait, expect_timeout): + script_url = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg" + url = inline(f"") + + await wait_for_navigation(bidi_session, new_tab["context"], url, wait, expect_timeout) + + # We cannot assert the URL for "none" by definition, and for "complete", since + # we expect a timeout. For the timeout case, the wait_for_navigation helper will + # resume after 1 second, there is no guarantee that the URL has been updated. + if wait == "interactive": + contexts = await bidi_session.browsing_context.get_tree( + root=new_tab["context"], max_depth=0 + ) + assert contexts[0]["url"] == url + + +@pytest.mark.parametrize( + "wait, expect_timeout", + [ + ("none", False), + ("interactive", True), + ("complete", True), + ], +) +async def test_slow_page(bidi_session, new_tab, url, wait, expect_timeout): + page_url = url( + "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html?pipe=trickle(d10)" + ) + + await wait_for_navigation(bidi_session, new_tab["context"], page_url, wait, expect_timeout) + + # Note that we cannot assert the top context url here, because the navigation + # is blocked on the initial url for this test case. + + +@pytest.mark.parametrize( + "wait, expect_timeout", + [ + ("none", False), + ("interactive", True), + ("complete", True), + ], +) +async def test_slow_script_blocks_domContentLoaded(bidi_session, inline, new_tab, wait, expect_timeout): + script_url = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.js" + url = inline(f"") + + await wait_for_navigation(bidi_session, new_tab["context"], url, wait, expect_timeout) + + # In theory we could also assert the top context URL has been updated here, + # but since we expect both "interactive" and "complete" to timeout the + # wait_for_navigation helper will resume arbitrarily after 1 second, and + # there is no guarantee that the URL has been updated. diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/background.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/background.py new file mode 100644 index 0000000000..b527aef382 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/background.py @@ -0,0 +1,56 @@ +import base64 +import pytest + +from tests.support.image import px_to_cm +from tests.support.pdf import assert_pdf + + +pytestmark = pytest.mark.asyncio + + +INLINE_BACKGROUND_RENDERING_TEST_CONTENT = """ + +""" + +BLACK_DOT_PNG = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVQIW2NgYGD4DwABBAEAwS2OUAAAAABJRU5ErkJggg==" +WHITE_DOT_PNG = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAC0lEQVQIW2P4DwQACfsD/Z8fLAAAAAAASUVORK5CYII=" + + +@pytest.mark.parametrize( + "print_with_background, expected_image", + [ + (None, WHITE_DOT_PNG), + (True, BLACK_DOT_PNG), + (False, WHITE_DOT_PNG), + ], +) +async def test_background( + bidi_session, + top_context, + inline, + compare_png_bidi, + render_pdf_to_png_bidi, + print_with_background, + expected_image, +): + page = inline(INLINE_BACKGROUND_RENDERING_TEST_CONTENT) + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=page, wait="complete" + ) + + print_value = await bidi_session.browsing_context.print( + context=top_context["context"], + background=print_with_background, + margin={"top": 0, "bottom": 0, "right": 0, "left": 0}, + page={"width": px_to_cm(1), "height": px_to_cm(1)}, + ) + + assert_pdf(print_value) + + png = await render_pdf_to_png_bidi(print_value) + comparison = await compare_png_bidi(png, base64.b64decode(expected_image)) + assert comparison.equal() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/context.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/context.py new file mode 100644 index 0000000000..f8074b71b4 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/context.py @@ -0,0 +1,61 @@ +import pytest + +pytestmark = pytest.mark.asyncio + + +async def test_context(bidi_session, top_context, inline, assert_pdf_content): + text = "Test" + url = inline(text) + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + + value = await bidi_session.browsing_context.print(context=top_context["context"]) + + await assert_pdf_content(value, [{"type": "string", "value": text}]) + + +async def test_page_with_iframe( + bidi_session, top_context, inline, iframe, assert_pdf_content +): + text = "Test" + iframe_content = "Iframe" + url = inline(f"{text}
{iframe(iframe_content)}") + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + + whole_page_value = await bidi_session.browsing_context.print( + context=top_context["context"] + ) + + await assert_pdf_content( + whole_page_value, [{"type": "string", "value": text + iframe_content}] + ) + + contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"]) + frame_context = contexts[0]["children"][0] + + frame_value = await bidi_session.browsing_context.print( + context=frame_context["context"] + ) + + await assert_pdf_content(frame_value, [{"type": "string", "value": iframe_content}]) + + +@pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"]) +async def test_context_origin( + bidi_session, top_context, inline, iframe, assert_pdf_content, domain +): + iframe_content = "Iframe" + url = inline(f"{iframe(iframe_content, domain=domain)}") + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + + contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"]) + frame_context = contexts[0]["children"][0] + + value = await bidi_session.browsing_context.print(context=frame_context["context"]) + + await assert_pdf_content(value, [{"type": "string", "value": iframe_content}]) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/invalid.py new file mode 100644 index 0000000000..114aeb62dc --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/invalid.py @@ -0,0 +1,197 @@ +import pytest +import webdriver.bidi.error as error + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("context", [None, False, 42, {}, []]) +async def test_params_context_invalid_type(bidi_session, context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print(context=context) + + +async def test_params_context_invalid_value(bidi_session): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.browsing_context.print(context="_invalid_") + + +async def test_params_context_closed(bidi_session): + new_tab = await bidi_session.browsing_context.create(type_hint="tab") + await bidi_session.browsing_context.close(context=new_tab["context"]) + + # Try to print the closed context + with pytest.raises(error.NoSuchFrameException): + await bidi_session.browsing_context.print(context=new_tab["context"]) + + +@pytest.mark.parametrize("background", ["foo", 42, {}, []]) +async def test_params_background_invalid_type(bidi_session, top_context, background): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], background=background + ) + + +@pytest.mark.parametrize( + "margin", + [ + False, + "foo", + 42, + [], + {"top": False}, + {"top": "foo"}, + {"top": []}, + {"top": {}}, + {"bottom": False}, + {"bottom": "foo"}, + {"bottom": []}, + {"bottom": {}}, + {"left": False}, + {"left": "foo"}, + {"left": []}, + {"left": {}}, + {"right": False}, + {"right": "foo"}, + {"right": []}, + {"right": {}}, + ], +) +async def test_params_margin_invalid_type(bidi_session, top_context, margin): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], margin=margin + ) + + +@pytest.mark.parametrize( + "margin", + [ + {"top": -0.1}, + {"bottom": -0.1}, + {"left": -0.1}, + {"right": -0.1}, + ], +) +async def test_params_margin_invalid_value(bidi_session, top_context, margin): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], margin=margin + ) + + +@pytest.mark.parametrize("orientation", [False, 42, {}, []]) +async def test_params_orientation_invalid_type(bidi_session, top_context, orientation): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], orientation=orientation + ) + + +async def test_params_orientation_invalid_value(bidi_session, top_context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], orientation="foo" + ) + + +@pytest.mark.parametrize( + "page", + [ + False, + "foo", + 42, + [], + {"height": False}, + {"height": "foo"}, + {"height": []}, + {"height": {}}, + {"width": False}, + {"width": "foo"}, + {"width": []}, + {"width": {}}, + ], +) +async def test_params_page_invalid_type(bidi_session, top_context, page): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], page=page + ) + + +@pytest.mark.parametrize( + "page", + [ + {"height": -1}, + {"width": -1}, + ], +) +async def test_params_page_invalid_value(bidi_session, top_context, page): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], page=page + ) + + +@pytest.mark.parametrize( + "page_ranges", + [ + False, + "foo", + 42, + {}, + [None], + [False], + [[]], + [{}], + ["1-2", {}], + ], +) +async def test_params_page_ranges_invalid_type(bidi_session, top_context, page_ranges): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], page_ranges=page_ranges + ) + +@pytest.mark.parametrize( + "page_ranges", + [ + [4.2], + ["4.2"], + ["3-2"], + ["a-2"], + ["1:2"], + ["1-2-3"], + ], +) +async def test_params_page_ranges_invalid_value(bidi_session, top_context, page_ranges): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], page_ranges=page_ranges + ) + + +@pytest.mark.parametrize("scale", [False, "foo", {}, []]) +async def test_params_scale_invalid_type(bidi_session, top_context, scale): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], scale=scale + ) + + +@pytest.mark.parametrize("scale", [-1, 0.09, 2.01, 42]) +async def test_params_scale_invalid_value(bidi_session, top_context, scale): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], scale=scale + ) + + +@pytest.mark.parametrize("shrink_to_fit", ["foo", 42, {}, []]) +async def test_params_shrink_to_fit_invalid_type( + bidi_session, top_context, shrink_to_fit +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.print( + context=top_context["context"], shrink_to_fit=shrink_to_fit + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/margin.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/margin.py new file mode 100644 index 0000000000..8ca1ef7077 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/margin.py @@ -0,0 +1,148 @@ +# META: timeout=long +import pytest + +pytestmark = pytest.mark.asyncio + + +def get_content(css=""): + return f""" +
+ + """ + + +@pytest.mark.parametrize( + "margin, reference_css, css", + [ + ( + {"top": 2.54}, + "margin-top: 1.54cm;", + "", + ), + ( + {"left": 2.54}, + "margin-left: 1.54cm;", + "", + ), + ( + {"right": 2.54}, + "margin-right: 1.54cm;", + "", + ), + ( + {"bottom": 2.54}, + "height: 24.4cm;", + "height: 26.94cm;", + ), + ], + ids=[ + "top", + "left", + "right", + "bottom", + ], +) +async def test_margin_default( + bidi_session, + top_context, + inline, + assert_pdf_image, + margin, + reference_css, + css, +): + default_content_page = inline(get_content(css)) + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=default_content_page, wait="complete" + ) + value_with_margin = await bidi_session.browsing_context.print( + context=top_context["context"], + shrink_to_fit=False, + background=True, + margin=margin, + ) + + # Compare a page with default margin (1.0cm) + css margin + # with a page with extended print margin. + await assert_pdf_image(value_with_margin, get_content(reference_css), True) + + +@pytest.mark.parametrize( + "margin", + [ + {"top": 27.94}, + {"left": 21.59}, + {"right": 21.59}, + {"bottom": 27.94}, + {"top": 27.94, "left": 21.59, "right": 21.59, "bottom": 27.94}, + ], + ids=[ + "top", + "left", + "right", + "bottom", + "all", + ], +) +async def test_margin_same_as_page_dimension( + bidi_session, + top_context, + inline, + assert_pdf_content, + margin, +): + page = inline("Text") + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=page, wait="complete" + ) + print_value = await bidi_session.browsing_context.print( + context=top_context["context"], + shrink_to_fit=False, + margin=margin, + ) + + # Check that content is out of page dimensions. + await assert_pdf_content(print_value, [{"type": "string", "value": ""}]) + + +@pytest.mark.parametrize( + "margin", + [ + {}, + {"top": 0, "left": 0, "right": 0, "bottom": 0}, + {"top": 2, "left": 2, "right": 2, "bottom": 2} + ], + ids=[ + "default", + "0", + "2" + ], +) +async def test_margin_does_not_affect_page_size( + bidi_session, + top_context, + inline, + assert_pdf_dimensions, + margin +): + url = inline("") + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + value = await bidi_session.browsing_context.print( + context=top_context["context"], + margin=margin + ) + + # Check that margins don't affect page dimencions and equal in this case defaults. + await assert_pdf_dimensions(value, {"width": 21.59, "height": 27.94}) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/orientation.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/orientation.py new file mode 100644 index 0000000000..4dee803bc8 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/orientation.py @@ -0,0 +1,43 @@ +import pytest + +from tests.support.image import png_dimensions +from tests.support.pdf import assert_pdf + + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize( + "orientation_value, is_portrait", + [ + (None, True), + ("portrait", True), + ("landscape", False), + ], + ids=[ + "default", + "portrait", + "landscape", + ], +) +async def test_orientation( + bidi_session, + top_context, + inline, + render_pdf_to_png_bidi, + orientation_value, + is_portrait, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=inline(""), wait="complete" + ) + print_value = await bidi_session.browsing_context.print( + context=top_context["context"], orientation=orientation_value + ) + + assert_pdf(print_value) + + png = await render_pdf_to_png_bidi(print_value) + width, height = png_dimensions(png) + + assert (width < height) == is_portrait diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/page.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/page.py new file mode 100644 index 0000000000..b66fb9881e --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/page.py @@ -0,0 +1,39 @@ +import pytest + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize( + "page, orientation, expected_dimensions", + [ + (None, "portrait", {"width": 21.59, "height": 27.94}), + ({}, "portrait",{"width": 21.59, "height": 27.94}), + ({"width": 4.5}, "portrait", {"width": 4.5, "height": 27.94}), + ({"height": 23}, "portrait", {"width": 21.59, "height": 23}), + ({"width": 4.5, "height": 12}, "portrait", {"width": 4.5, "height": 12}), + ({"height": 12}, "portrait", {"width": 21.59, "height": 12}), + (None, "landscape", {"width": 27.94, "height": 21.59}), + ({}, "landscape",{"width": 27.94, "height": 21.59}), + ({"width": 4.5}, "landscape", {"width": 27.94, "height": 4.5}), + ({"height": 23}, "landscape", {"width": 23, "height": 21.59}), + ({"width": 4.5, "height": 12}, "landscape", {"width": 12, "height": 4.5}), + ({"height": 12}, "landscape", {"width": 12, "height": 21.59}), + ], +) +async def test_page( + bidi_session, + top_context, + inline, + assert_pdf_dimensions, + page, + orientation, + expected_dimensions, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=inline(""), wait="complete" + ) + value = await bidi_session.browsing_context.print( + context=top_context["context"], page=page, orientation=orientation + ) + + await assert_pdf_dimensions(value, expected_dimensions) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/page_ranges.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/page_ranges.py new file mode 100644 index 0000000000..64843d3496 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/page_ranges.py @@ -0,0 +1,131 @@ +# META: timeout=long +import pytest + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize( + "ranges,expected", + [ + ( + ["2-4"], + [ + {"type": "string", "value": "Page 2"}, + {"type": "string", "value": "Page 3"}, + {"type": "string", "value": "Page 4"}, + ], + ), + ( + ["2-4", "2-3"], + [ + {"type": "string", "value": "Page 2"}, + {"type": "string", "value": "Page 3"}, + {"type": "string", "value": "Page 4"}, + ], + ), + ( + ["2-4", "3-5"], + [ + {"type": "string", "value": "Page 2"}, + {"type": "string", "value": "Page 3"}, + {"type": "string", "value": "Page 4"}, + {"type": "string", "value": "Page 5"}, + ], + ), + ( + ["9-"], + [ + {"type": "string", "value": "Page 9"}, + {"type": "string", "value": "Page 10"}, + ], + ), + ( + ["-2"], + [ + {"type": "string", "value": "Page 1"}, + {"type": "string", "value": "Page 2"}, + ], + ), + ( + [7], + [ + {"type": "string", "value": "Page 7"}, + ], + ), + ( + ["7"], + [ + {"type": "string", "value": "Page 7"}, + ], + ), + ( + ["-2", "9-", "7"], + [ + {"type": "string", "value": "Page 1"}, + {"type": "string", "value": "Page 2"}, + {"type": "string", "value": "Page 7"}, + {"type": "string", "value": "Page 9"}, + {"type": "string", "value": "Page 10"}, + ], + ), + ( + ["-5", "2-"], + [ + {"type": "string", "value": "Page 1"}, + {"type": "string", "value": "Page 2"}, + {"type": "string", "value": "Page 3"}, + {"type": "string", "value": "Page 4"}, + {"type": "string", "value": "Page 5"}, + {"type": "string", "value": "Page 6"}, + {"type": "string", "value": "Page 7"}, + {"type": "string", "value": "Page 8"}, + {"type": "string", "value": "Page 9"}, + {"type": "string", "value": "Page 10"}, + ], + ), + ( + [], + [ + {"type": "string", "value": "Page 1"}, + {"type": "string", "value": "Page 2"}, + {"type": "string", "value": "Page 3"}, + {"type": "string", "value": "Page 4"}, + {"type": "string", "value": "Page 5"}, + {"type": "string", "value": "Page 6"}, + {"type": "string", "value": "Page 7"}, + {"type": "string", "value": "Page 8"}, + {"type": "string", "value": "Page 9"}, + {"type": "string", "value": "Page 10"}, + ], + ), + ], +) +async def test_page_ranges_document( + bidi_session, inline, top_context, assert_pdf_content, ranges, expected +): + url = inline( + """ + + +
Page 1
+
Page 2
+
Page 3
+
Page 4
+
Page 5
+
Page 6
+
Page 7
+
Page 8
+
Page 9
+
Page 10
""" + ) + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + + value = await bidi_session.browsing_context.print( + context=top_context["context"], page_ranges=ranges + ) + + await assert_pdf_content(value, expected) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/scale.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/scale.py new file mode 100644 index 0000000000..bffc09af67 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/scale.py @@ -0,0 +1,57 @@ +import pytest + +pytestmark = pytest.mark.asyncio + + +def get_content(css=""): + return f""" +
+ + """ + + +@pytest.mark.parametrize( + "scale, reference_css", + [ + (None, "width: 100px; height: 100px;"), + (2, "width: 200px; height: 200px;"), + (0.5, "width: 50px; height: 50px;"), + ], + ids=["default", "twice", "half"], +) +async def test_scale( + bidi_session, + top_context, + inline, + assert_pdf_image, + scale, + reference_css, +): + not_scaled_content = get_content("width: 100px; height: 100px;") + default_content_page = inline(not_scaled_content) + + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=default_content_page, wait="complete" + ) + + scaled_print_value = await bidi_session.browsing_context.print( + context=top_context["context"], + shrink_to_fit=False, + scale=scale, + background=True, + ) + + # Check that pdf scaled with print command is equal pdf of scaled with css content. + await assert_pdf_image(scaled_print_value, get_content(reference_css), True) + # If scale is not None, check that pdf scaled with print command is not equal pdf with not scaled content. + if scale is not None: + await assert_pdf_image(scaled_print_value, not_scaled_content, False) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/shrink_to_fit.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/shrink_to_fit.py new file mode 100644 index 0000000000..db355280de --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/print/shrink_to_fit.py @@ -0,0 +1,50 @@ +import pytest + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize( + "shrink_to_fit, pages_content", + [ + (None, [{"type": "string", "value": "Block 1Block 2Block 3Block 4"}]), + (True, [{"type": "string", "value": "Block 1Block 2Block 3Block 4"}]), + ( + False, + [ + {"type": "string", "value": "Block 1Block 2Block 3"}, + {"type": "string", "value": "Block 4"}, + ], + ), + ], + ids=["default", "True", "False"], +) +async def test_shrink_to_fit( + bidi_session, + top_context, + inline, + assert_pdf_content, + shrink_to_fit, + pages_content, +): + url = inline( + """ + +
Block 1
+
Block 2
+
Block 3
+
Block 4
+ """ + ) + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + value = await bidi_session.browsing_context.print( + context=top_context["context"], shrink_to_fit=shrink_to_fit + ) + + await assert_pdf_content(value, pages_content) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/reload/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/reload/invalid.py new file mode 100644 index 0000000000..35be21ef19 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/reload/invalid.py @@ -0,0 +1,37 @@ +import pytest +import webdriver.bidi.error as error + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("value", [None, False, 42, {}, []]) +async def test_params_context_invalid_type(bidi_session, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.reload(context=value) + + +@pytest.mark.parametrize("value", ["", "somestring"]) +async def test_params_context_invalid_value(bidi_session, value): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.browsing_context.reload(context=value) + + +@pytest.mark.parametrize("value", ["", 42, {}, []]) +async def test_params_ignore_cache_invalid_type(bidi_session, new_tab, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.reload(context=new_tab["context"], + ignore_cache=value) + + +@pytest.mark.parametrize("value", [False, 42, {}, []]) +async def test_params_wait_invalid_type(bidi_session, new_tab, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.reload(context=new_tab["context"], + wait=value) + + +@pytest.mark.parametrize("value", ["", "somestring"]) +async def test_params_wait_invalid_value(bidi_session, new_tab, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.browsing_context.reload(context=new_tab["context"], + wait=value) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/errors/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/errors/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/errors/errors.py b/testing/web-platform/tests/webdriver/tests/bidi/errors/errors.py new file mode 100644 index 0000000000..0d6e9ea0e1 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/errors/errors.py @@ -0,0 +1,16 @@ +import pytest + +from webdriver.bidi.error import UnknownCommandException + + +@pytest.mark.asyncio +@pytest.mark.parametrize("module_name, command_name", [ + ("invalidmodule", "somecommand"), + ("session", "wrongcommand"), +], ids=[ + 'invalid module', + 'invalid command name', +]) +async def test_unknown_command(bidi_session, send_blocking_command, module_name, command_name): + with pytest.raises(UnknownCommandException): + await send_blocking_command(f"{module_name}.{command_name}", {}) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/input/__init__.py new file mode 100644 index 0000000000..7aa3355929 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/__init__.py @@ -0,0 +1,38 @@ +import json + +from webdriver.bidi.modules.script import ContextTarget + + +async def get_events(bidi_session, context): + """Return list of key events recorded on the test_actions.html page.""" + events_str = await bidi_session.script.evaluate( + expression="JSON.stringify(allEvents.events)", + target=ContextTarget(context), + await_promise=False, + ) + events = json.loads(events_str["value"]) + + # `key` values in `allEvents` may be escaped (see `escapeSurrogateHalf` in + # test_actions.html), so this converts them back into unicode literals. + for e in events: + # example: turn "U+d83d" (6 chars) into u"\ud83d" (1 char) + if "key" in e and e["key"].startswith("U+"): + key = e["key"] + hex_suffix = key[key.index("+") + 1 :] + e["key"] = chr(int(hex_suffix, 16)) + + # WebKit sets code as 'Unidentified' for unidentified key codes, but + # tests expect ''. + if "code" in e and e["code"] == "Unidentified": + e["code"] = "" + return events + + +async def get_keys_value(bidi_session, context): + keys_value = await bidi_session.script.evaluate( + expression="""document.getElementById("keys").value""", + target=ContextTarget(context), + await_promise=False, + ) + + return keys_value["value"] diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/conftest.py b/testing/web-platform/tests/webdriver/tests/bidi/input/conftest.py new file mode 100644 index 0000000000..71a1152729 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/conftest.py @@ -0,0 +1,46 @@ +import pytest +import pytest_asyncio + +from webdriver.bidi.modules.script import ContextTarget + +from tests.support.keys import Keys + +@pytest.fixture +def get_focused_key_input(bidi_session, top_context): + """Get focused input element, containing pressed key data.""" + + async def get_focused_key_input(context=top_context): + return await bidi_session.script.call_function( + function_declaration="""() => { + const elem = document.getElementById("keys"); + elem.focus(); + return elem; + }""", + target=ContextTarget(context["context"]), + await_promise=False, + ) + + return get_focused_key_input + + +@pytest_asyncio.fixture(autouse=True) +async def release_actions(bidi_session, top_context): + # release all actions after each test + yield + await bidi_session.input.release_actions(context=top_context["context"]) + + +@pytest_asyncio.fixture +async def setup_key_test(load_static_test_page, get_focused_key_input): + await load_static_test_page(page="test_actions.html") + await get_focused_key_input() + + +@pytest_asyncio.fixture +async def setup_wheel_test(bidi_session, top_context, load_static_test_page): + await load_static_test_page(page="test_actions_scroll.html") + await bidi_session.script.evaluate( + expression="document.scrollingElement.scrollTop = 0", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/__init__.py new file mode 100644 index 0000000000..27bf9543b8 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/__init__.py @@ -0,0 +1,64 @@ +from webdriver.bidi.modules.script import ContextTarget + + +def remote_mapping_to_dict(js_object): + obj = {} + for key, value in js_object: + obj[key] = value["value"] + + return obj + + +async def get_inview_center_bidi(bidi_session, context, element): + elem_rect = await get_element_rect(bidi_session, context=context, element=element) + viewport_rect = await get_viewport_rect(bidi_session, context=context) + + x = { + "left": max(0, min(elem_rect["x"], elem_rect["x"] + elem_rect["width"])), + "right": min( + viewport_rect["width"], + max(elem_rect["x"], elem_rect["x"] + elem_rect["width"]), + ), + } + + y = { + "top": max(0, min(elem_rect["y"], elem_rect["y"] + elem_rect["height"])), + "bottom": min( + viewport_rect["height"], + max(elem_rect["y"], elem_rect["y"] + elem_rect["height"]), + ), + } + + return { + "x": (x["left"] + x["right"]) / 2, + "y": (y["top"] + y["bottom"]) / 2, + } + + +async def get_element_rect(bidi_session, context, element): + result = await bidi_session.script.call_function( + function_declaration=""" +el => el.getBoundingClientRect().toJSON() +""", + arguments=[element], + target=ContextTarget(context["context"]), + await_promise=False, + ) + + return remote_mapping_to_dict(result["value"]) + + +async def get_viewport_rect(bidi_session, context): + expression = """ + ({ + height: window.innerHeight || document.documentElement.clientHeight, + width: window.innerWidth || document.documentElement.clientWidth, + }); + """ + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(context["context"]), + await_promise=False, + ) + + return remote_mapping_to_dict(result["value"]) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/invalid.py new file mode 100644 index 0000000000..2fb45cd832 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/invalid.py @@ -0,0 +1,231 @@ +import pytest + +from webdriver.bidi.modules.input import Actions, get_element_origin +from webdriver.bidi.error import ( + InvalidArgumentException, + MoveTargetOutOfBoundsException, + NoSuchElementException, + NoSuchFrameException, + NoSuchNodeException, +) +from webdriver.bidi.modules.script import ContextTarget + + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("value", [None, True, 42, {}, []]) +async def test_params_context_invalid_type(bidi_session, value): + actions = Actions() + actions.add_key() + with pytest.raises(InvalidArgumentException): + await bidi_session.input.perform_actions(actions=actions, context=value) + + +async def test_params_contexts_value_invalid_value(bidi_session): + actions = Actions() + actions.add_key() + with pytest.raises(NoSuchFrameException): + await bidi_session.input.perform_actions(actions=actions, context="foo") + + +@pytest.mark.parametrize( + "value", + [("fa"), ("\u0BA8\u0BBFb"), ("\u0BA8\u0BBF\u0BA8"), ("\u1100\u1161\u11A8c")], +) +async def test_params_actions_invalid_value_multiple_codepoints( + bidi_session, top_context, setup_key_test, value +): + actions = Actions() + actions.add_key().key_down(value).key_up(value) + with pytest.raises(InvalidArgumentException): + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + +@pytest.mark.parametrize("missing", ["x", "y"]) +async def test_params_actions_missing_coordinates(bidi_session, top_context, missing): + actions = Actions() + pointer_input_source = actions.add_pointer().pointer_move(x=0, y=0) + + json_actions = actions.to_json() + pointer_input_source_json = json_actions[-1]["actions"] + del pointer_input_source_json[-1][missing] + + with pytest.raises(InvalidArgumentException): + await bidi_session.input.perform_actions( + actions=json_actions, context=top_context["context"] + ) + + +@pytest.mark.parametrize("missing", ["x", "y", "deltaX", "deltaY"]) +async def test_params_actions_missing_wheel_property( + bidi_session, top_context, missing +): + actions = Actions() + wheel_input_source = actions.add_wheel().scroll(x=0, y=0, delta_x=5, delta_y=10) + + json_actions = actions.to_json() + wheel_input_actions_json = json_actions[-1]["actions"] + del wheel_input_actions_json[-1][missing] + + with pytest.raises(InvalidArgumentException): + await bidi_session.input.perform_actions( + actions=json_actions, context=top_context["context"] + ) + + +async def test_params_actions_origin_element_outside_viewport( + bidi_session, top_context, get_actions_origin_page, get_element +): + url = get_actions_origin_page( + """width: 100px; height: 50px; background: green; + position: relative; left: -200px; top: -100px;""" + ) + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=url, + wait="complete", + ) + + elem = await get_element("#inner") + + actions = Actions() + actions.add_pointer().pointer_move(x=0, y=0, origin=get_element_origin(elem)) + with pytest.raises(MoveTargetOutOfBoundsException): + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + +@pytest.mark.parametrize("value", [True, 42, []]) +async def test_params_actions_origin_invalid_type(bidi_session, top_context, value): + actions = Actions() + actions.add_pointer().pointer_move(x=0, y=0, origin=value) + with pytest.raises(InvalidArgumentException): + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + +@pytest.mark.parametrize("value", [None, True, 42, {}, [], "foo"]) +async def test_params_actions_origin_invalid_value_type( + bidi_session, top_context, get_actions_origin_page, get_element, value +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=get_actions_origin_page(""), + wait="complete", + ) + + elem = await get_element("#inner") + actions = Actions() + actions.add_pointer().pointer_move( + x=0, y=0, origin={"type": value, "element": {"sharedId": elem["sharedId"]}} + ) + with pytest.raises(InvalidArgumentException): + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + +@pytest.mark.parametrize("value", [None, True, 42, {}, [], "foo"]) +async def test_params_actions_origin_invalid_value_element( + bidi_session, top_context, value +): + actions = Actions() + actions.add_pointer().pointer_move( + x=0, y=0, origin={"type": "element", "element": value} + ) + with pytest.raises(InvalidArgumentException): + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + +async def test_params_actions_origin_invalid_value_serialized_element( + bidi_session, top_context, get_actions_origin_page, get_element +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=get_actions_origin_page(""), + wait="complete", + ) + + elem = await get_element("#inner") + + actions = Actions() + actions.add_pointer().pointer_move(x=0, y=0, origin=elem) + with pytest.raises(InvalidArgumentException): + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + +@pytest.mark.parametrize( + "expression", + [ + "document.querySelector('input#button').attributes[0]", + "document.querySelector('#with-text-node').childNodes[0]", + """document.createProcessingInstruction("xml-stylesheet", "href='foo.css'")""", + "document.querySelector('#with-comment').childNodes[0]", + "document", + "document.doctype", + "document.createDocumentFragment()", + "document.querySelector('#custom-element').shadowRoot", + ], + ids=[ + "attribute", + "text node", + "processing instruction", + "comment", + "document", + "doctype", + "document fragment", + "shadow root", + ] +) +async def test_params_actions_origin_no_such_element( + bidi_session, top_context, get_test_page, expression +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=get_test_page(), + wait="complete", + ) + + node = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + actions = Actions() + actions.add_pointer().pointer_move(x=0, y=0, origin=get_element_origin(node)) + with pytest.raises(NoSuchElementException): + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + +async def test_params_actions_origin_no_such_node(bidi_session, top_context): + actions = Actions() + actions.add_pointer().pointer_move( + x=0, y=0, origin={"type": "element", "element": {"sharedId": "foo"}} + ) + with pytest.raises(NoSuchNodeException): + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + +@pytest.mark.parametrize("origin", ["viewport", "pointer"]) +async def test_params_actions_origin_outside_viewport( + bidi_session, top_context, origin +): + actions = Actions() + actions.add_pointer().pointer_move(x=-50, y=-50, origin=origin) + with pytest.raises(MoveTargetOutOfBoundsException): + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key.py new file mode 100644 index 0000000000..9189bc96cc --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key.py @@ -0,0 +1,54 @@ +import pytest + +from webdriver.bidi.modules.input import Actions + +from tests.support.keys import Keys +from .. import get_keys_value + +pytestmark = pytest.mark.asyncio + + +async def test_key_backspace(bidi_session, top_context, setup_key_test): + actions = Actions() + actions.add_key().send_keys("efcd").send_keys([Keys.BACKSPACE, Keys.BACKSPACE]) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + assert keys_value == "ef" + + +@pytest.mark.parametrize( + "value", + [ + ("\U0001F604"), + ("\U0001F60D"), + ("\u0BA8\u0BBF"), + ("\u1100\u1161\u11A8"), + ], +) +async def test_key_codepoint( + bidi_session, top_context, setup_key_test, value +): + # Not using send_keys() because we always want to treat value as + # one character here. `len(value)` varies by platform for non-BMP characters, + # so we don't want to iterate over value. + + actions = Actions() + (actions.add_key().key_down(value).key_up(value)) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + # events sent by major browsers are inconsistent so only check key value + keys_value = await get_keys_value(bidi_session, top_context["context"]) + assert keys_value == value + + +async def test_null_response_value(bidi_session, top_context): + actions = Actions() + actions.add_key().key_down("a").key_up("a") + value = await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + assert value == {} diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key_events.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key_events.py new file mode 100644 index 0000000000..79cecce5ce --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key_events.py @@ -0,0 +1,271 @@ +# META: timeout=long +import copy +import pytest + +from collections import defaultdict + +from webdriver.bidi.modules.input import Actions +from webdriver.bidi.modules.script import ContextTarget + +from tests.support.helpers import filter_dict, filter_supported_key_events +from tests.support.keys import ALL_EVENTS, Keys, ALTERNATIVE_KEY_NAMES +from .. import get_events, get_keys_value + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize( + "key,event", + [ + (Keys.ESCAPE, "ESCAPE"), + (Keys.RIGHT, "RIGHT"), + ], +) +async def test_non_printable_key_sends_events( + bidi_session, top_context, setup_key_test, key, event +): + code = ALL_EVENTS[event]["code"] + value = ALL_EVENTS[event]["key"] + + actions = Actions() + (actions.add_key().key_down(key).key_up(key)) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + all_events = await get_events(bidi_session, top_context["context"]) + + expected = [ + {"code": code, "key": value, "type": "keydown"}, + {"code": code, "key": value, "type": "keypress"}, + {"code": code, "key": value, "type": "keyup"}, + ] + + # Make a copy for alternate key property values + # Note: only keydown and keyup are affected by alternate key names + alt_expected = copy.deepcopy(expected) + if event in ALTERNATIVE_KEY_NAMES: + alt_expected[0]["key"] = ALTERNATIVE_KEY_NAMES[event] + alt_expected[2]["key"] = ALTERNATIVE_KEY_NAMES[event] + + (_, expected) = filter_supported_key_events(all_events, expected) + (events, alt_expected) = filter_supported_key_events(all_events, alt_expected) + if len(events) == 2: + # most browsers don't send a keypress for non-printable keys + assert events == [expected[0], expected[2]] or events == [ + alt_expected[0], + alt_expected[2], + ] + else: + assert events == expected or events == alt_expected + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + assert len(keys_value) == 0 + + +@pytest.mark.parametrize( + "key, event", + [ + (Keys.ALT, "ALT"), + (Keys.CONTROL, "CONTROL"), + (Keys.META, "META"), + (Keys.SHIFT, "SHIFT"), + (Keys.R_ALT, "R_ALT"), + (Keys.R_CONTROL, "R_CONTROL"), + (Keys.R_META, "R_META"), + (Keys.R_SHIFT, "R_SHIFT"), + ], +) +async def test_key_modifier_key( + bidi_session, top_context, setup_key_test, key, event +): + code = ALL_EVENTS[event]["code"] + value = ALL_EVENTS[event]["key"] + + actions = Actions() + (actions.add_key().key_down(key).key_up(key)) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + all_events = await get_events(bidi_session, top_context["context"]) + + expected = [ + {"code": code, "key": value, "type": "keydown"}, + {"code": code, "key": value, "type": "keyup"}, + ] + + (events, expected) = filter_supported_key_events(all_events, expected) + assert events == expected + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + assert len(keys_value) == 0 + + +@pytest.mark.parametrize( + "value,code", + [ + ("a", "KeyA"), + ("a", "KeyA"), + ('"', "Quote"), + (",", "Comma"), + ("\u00E0", ""), + ("\u0416", ""), + ("@", "Digit2"), + ("\u2603", ""), + ("\uF6C2", ""), # PUA + ], +) +async def test_key_printable_key( + bidi_session, + top_context, + setup_key_test, + value, + code, +): + actions = Actions() + (actions.add_key().key_down(value).key_up(value)) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + all_events = await get_events(bidi_session, top_context["context"]) + + expected = [ + {"code": code, "key": value, "type": "keydown"}, + {"code": code, "key": value, "type": "keypress"}, + {"code": code, "key": value, "type": "keyup"}, + ] + + (events, expected) = filter_supported_key_events(all_events, expected) + assert events == expected + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + assert keys_value == value + + +@pytest.mark.parametrize("use_keyup", [True, False]) +async def test_key_printable_sequence( + bidi_session, top_context, setup_key_test, use_keyup +): + actions = Actions() + key_source = actions.add_key() + if use_keyup: + actions.add_key().send_keys("ab") + else: + actions.add_key().key_down("a").key_down("b") + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + all_events = await get_events(bidi_session, top_context["context"]) + + expected = [ + {"code": "KeyA", "key": "a", "type": "keydown"}, + {"code": "KeyA", "key": "a", "type": "keypress"}, + {"code": "KeyA", "key": "a", "type": "keyup"}, + {"code": "KeyB", "key": "b", "type": "keydown"}, + {"code": "KeyB", "key": "b", "type": "keypress"}, + {"code": "KeyB", "key": "b", "type": "keyup"}, + ] + expected = [e for e in expected if use_keyup or e["type"] is not "keyup"] + + (events, expected) = filter_supported_key_events(all_events, expected) + assert events == expected + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + assert keys_value == "ab" + + +@pytest.mark.parametrize("name,expected", ALL_EVENTS.items()) +async def test_key_special_key_sends_keydown( + bidi_session, + top_context, + setup_key_test, + name, + expected, +): + if name.startswith("F"): + # Prevent default behavior for F1, etc., but only after keydown + # bubbles up to body. (Otherwise activated browser menus/functions + # may interfere with subsequent tests.) + await bidi_session.script.evaluate( + expression=""" + document.body.addEventListener("keydown", + function(e) { e.preventDefault() }); + """, + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + actions = Actions() + (actions.add_key().key_down(getattr(Keys, name))) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + # only interested in keydown + all_events = await get_events(bidi_session, top_context["context"]) + first_event = all_events[0] + # make a copy so we can throw out irrelevant keys and compare to events + expected = dict(expected) + + del expected["value"] + + # make another copy for alternative key names + alt_expected = copy.deepcopy(expected) + if name in ALTERNATIVE_KEY_NAMES: + alt_expected["key"] = ALTERNATIVE_KEY_NAMES[name] + + # check and remove keys that aren't in expected + assert first_event["type"] == "keydown" + assert first_event["repeat"] is False + first_event = filter_dict(first_event, expected) + if first_event["code"] is None: + del first_event["code"] + del expected["code"] + del alt_expected["code"] + assert first_event == expected or first_event == alt_expected + # only printable characters should be recorded in input field + keys_value = await get_keys_value(bidi_session, top_context["context"]) + if len(expected["key"]) == 1: + assert keys_value == expected["key"] + else: + assert len(keys_value) == 0 + + +async def test_key_space(bidi_session, top_context, setup_key_test): + actions = Actions() + ( + actions.add_key() + .key_down(Keys.SPACE) + .key_up(Keys.SPACE) + .key_down(" ") + .key_up(" ") + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + all_events = await get_events(bidi_session, top_context["context"]) + + by_type = defaultdict(list) + for event in all_events: + by_type[event["type"]].append(event) + + for event_type in by_type: + events = by_type[event_type] + assert len(events) == 2 + assert events[0] == events[1] + + +async def test_keyup_only_sends_no_events(bidi_session, top_context, setup_key_test): + actions = Actions() + actions.add_key().key_up("a") + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + events = await get_events(bidi_session, top_context["context"]) + assert len(events) == 0 + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + assert len(keys_value) == 0 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key_modifier.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key_modifier.py new file mode 100644 index 0000000000..e319bb70aa --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/key_modifier.py @@ -0,0 +1,163 @@ +import pytest + +from webdriver.bidi.modules.input import Actions + +from tests.support.keys import Keys +from .. import get_keys_value + +pytestmark = pytest.mark.asyncio + + +async def test_meta_or_ctrl_with_printable_and_backspace_deletes_all_text( + bidi_session, top_context, setup_key_test, modifier_key +): + actions = Actions() + ( + actions.add_key() + .send_keys("abc d") + .key_down(modifier_key) + .key_down("a") + .key_up(modifier_key) + .key_up("a") + .key_down(Keys.BACKSPACE) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + assert keys_value == "" + + +async def test_meta_or_ctrl_with_printable_cut_and_paste_text( + bidi_session, top_context, setup_key_test, modifier_key +): + initial = "abc d" + actions = Actions() + ( + actions.add_key() + .send_keys(initial) + .key_down(modifier_key) + .key_down("a") + .key_up(modifier_key) + .key_up("a") + .key_down(modifier_key) + .key_down("x") + .key_up(modifier_key) + .key_up("x") + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + assert keys_value == "" + + actions = Actions() + ( + actions.add_key() + .key_down(modifier_key) + .key_down("v") + .key_up(modifier_key) + .key_up("v") + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + assert keys_value == initial + + +async def test_meta_or_ctrl_with_printable_copy_and_paste_text( + bidi_session, top_context, setup_key_test, modifier_key +): + initial = "abc d" + actions = Actions() + ( + actions.add_key() + .send_keys(initial) + .key_down(modifier_key) + .key_down("a") + .key_up(modifier_key) + .key_up("a") + .key_down(modifier_key) + .key_down("c") + .key_up(modifier_key) + .key_up("c") + .send_keys([Keys.RIGHT]) + .key_down(modifier_key) + .key_down("v") + .key_up(modifier_key) + .key_up("v") + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + assert keys_value == initial * 2 + + +@pytest.mark.parametrize("modifier", [Keys.SHIFT, Keys.R_SHIFT]) +async def test_key_modifier_shift_non_printable_keys( + bidi_session, top_context, setup_key_test, modifier +): + actions = Actions() + ( + actions.add_key() + .key_down("f") + .key_up("f") + .key_down("o") + .key_up("o") + .key_down("o") + .key_up("o") + .key_down(modifier) + .key_down(Keys.BACKSPACE) + .key_up(modifier) + .key_up(Keys.BACKSPACE) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + + assert keys_value == "fo" + + +@pytest.mark.parametrize("modifier", [Keys.SHIFT, Keys.R_SHIFT]) +async def test_key_modifier_shift_printable_keys( + bidi_session, top_context, setup_key_test, modifier +): + actions = Actions() + ( + actions.add_key() + .key_down("b") + .key_up("b") + .key_down(modifier) + .key_down("c") + .key_up(modifier) + .key_up("c") + .key_down("d") + .key_up("d") + .key_down(modifier) + .key_down("e") + .key_up("e") + .key_down("f") + .key_up(modifier) + .key_up("f") + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + keys_value = await get_keys_value(bidi_session, top_context["context"]) + + assert keys_value == "bCdEF" diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse.py new file mode 100644 index 0000000000..7e26258101 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse.py @@ -0,0 +1,254 @@ +import pytest + +from webdriver.bidi.modules.input import Actions, get_element_origin + +from tests.support.asserts import assert_move_to_coordinates +from tests.support.helpers import filter_dict + +from .. import get_events +from . import get_element_rect, get_inview_center_bidi + +pytestmark = pytest.mark.asyncio + + +async def test_click_at_coordinates(bidi_session, top_context, load_static_test_page): + await load_static_test_page(page="test_actions.html") + + div_point = { + "x": 82, + "y": 187, + } + actions = Actions() + ( + actions.add_pointer() + .pointer_move(x=div_point["x"], y=div_point["y"], duration=1000) + .pointer_down(button=0) + .pointer_up(button=0) + ) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + events = await get_events(bidi_session, top_context["context"]) + + assert len(events) == 4 + assert_move_to_coordinates(div_point, "outer", events) + + for e in events: + if e["type"] != "mousedown": + assert e["buttons"] == 0 + assert e["button"] == 0 + + expected = [ + {"type": "mousedown", "buttons": 1}, + {"type": "mouseup", "buttons": 0}, + {"type": "click", "buttons": 0}, + ] + filtered_events = [filter_dict(e, expected[0]) for e in events] + assert expected == filtered_events[1:] + + +async def test_context_menu_at_coordinates( + bidi_session, top_context, load_static_test_page +): + await load_static_test_page(page="test_actions.html") + + div_point = { + "x": 82, + "y": 187, + } + + actions = Actions() + ( + actions.add_pointer() + .pointer_move(x=div_point["x"], y=div_point["y"]) + .pointer_down(button=2) + .pointer_up(button=2) + ) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + events = await get_events(bidi_session, top_context["context"]) + assert len(events) == 4 + + expected = [ + {"type": "mousedown", "button": 2}, + {"type": "contextmenu", "button": 2}, + ] + filtered_events = [filter_dict(e, expected[0]) for e in events] + mousedown_contextmenu_events = [ + x for x in filtered_events if x["type"] in ["mousedown", "contextmenu"] + ] + assert expected == mousedown_contextmenu_events + + +async def test_click_element_center( + bidi_session, top_context, get_element, load_static_test_page +): + await load_static_test_page(page="test_actions.html") + + outer = await get_element("#outer") + center = await get_inview_center_bidi( + bidi_session, context=top_context, element=outer + ) + + actions = Actions() + ( + actions.add_pointer() + .pointer_move(x=0, y=0, origin=get_element_origin(outer)) + .pointer_down(button=0) + .pointer_up(button=0) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + events = await get_events(bidi_session, top_context["context"]) + assert len(events) == 4 + + event_types = [e["type"] for e in events] + assert ["mousemove", "mousedown", "mouseup", "click"] == event_types + for e in events: + if e["type"] != "mousemove": + assert e["pageX"] == pytest.approx(center["x"], abs=1.0) + assert e["pageY"] == pytest.approx(center["y"], abs=1.0) + assert e["target"] == "outer" + + +async def test_click_navigation( + bidi_session, + top_context, + url, + inline, + subscribe_events, + wait_for_event, + get_element, +): + await subscribe_events(events=["browsingContext.load"]) + + destination = url("/webdriver/tests/support/html/test_actions.html") + start = inline(f'destination') + + async def click_link(): + link = await get_element("#link") + + actions = Actions() + ( + actions.add_pointer() + .pointer_move(x=0, y=0, origin=get_element_origin(link)) + .pointer_down(button=0) + .pointer_up(button=0) + ) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + # repeat steps to check behaviour after document unload + for _ in range(2): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=start, wait="complete" + ) + + on_entry = wait_for_event("browsingContext.load") + await click_link() + event = await on_entry + assert event["url"] == destination + + +@pytest.mark.parametrize("drag_duration", [0, 300, 800]) +@pytest.mark.parametrize( + "dx, dy", [(20, 0), (0, 15), (10, 15), (-20, 0), (10, -15), (-10, -15)] +) +async def test_drag_and_drop( + bidi_session, + top_context, + get_element, + load_static_test_page, + dx, + dy, + drag_duration, +): + await load_static_test_page(page="test_actions.html") + + drag_target = await get_element("#dragTarget") + initial_rect = await get_element_rect( + bidi_session, context=top_context, element=drag_target + ) + initial_center = await get_inview_center_bidi( + bidi_session, context=top_context, element=drag_target + ) + + # Conclude chain with extra move to allow time for last queued + # coordinate-update of drag_target and to test that drag_target is "dropped". + actions = Actions() + ( + actions.add_pointer() + .pointer_move(x=0, y=0, origin=get_element_origin(drag_target)) + .pointer_down(button=0) + .pointer_move(dx, dy, duration=drag_duration, origin="pointer") + .pointer_up(button=0) + .pointer_move(80, 50, duration=100, origin="pointer") + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + # mouseup that ends the drag is at the expected destination + events = await get_events(bidi_session, top_context["context"]) + e = events[1] + assert e["type"] == "mouseup" + assert e["pageX"] == pytest.approx(initial_center["x"] + dx, abs=1.0) + assert e["pageY"] == pytest.approx(initial_center["y"] + dy, abs=1.0) + # check resulting location of the dragged element + final_rect = await get_element_rect( + bidi_session, context=top_context, element=drag_target + ) + assert initial_rect["x"] + dx == final_rect["x"] + assert initial_rect["y"] + dy == final_rect["y"] + + +@pytest.mark.parametrize("drag_duration", [0, 300, 800]) +async def test_drag_and_drop_with_draggable_element( + bidi_session, top_context, get_element, load_static_test_page, drag_duration +): + new_context = await bidi_session.browsing_context.create(type_hint="window") + await load_static_test_page(page="test_actions.html", context=new_context) + drag_target = await get_element("#draggable") + drop_target = await get_element("#droppable") + + # Conclude chain with extra move to allow time for last queued + # coordinate-update of drag_target and to test that drag_target is "dropped". + actions = Actions() + ( + actions.add_pointer() + .pointer_move(x=0, y=0, origin=get_element_origin(drag_target)) + .pointer_down(button=0) + .pointer_move( + x=50, y=25, duration=drag_duration, origin=get_element_origin(drop_target) + ) + .pointer_up(button=0) + .pointer_move(80, 50, duration=100, origin="pointer") + ) + + await bidi_session.input.perform_actions( + actions=actions, context=new_context["context"] + ) + + # mouseup that ends the drag is at the expected destination + events = await get_events(bidi_session, new_context["context"]) + + assert len(events) >= 5 + assert events[1]["type"] == "dragstart", f"Events captured were {events}" + assert events[2]["type"] == "dragover", f"Events captured were {events}" + drag_events_captured = [ + ev["type"] + for ev in events + if ev["type"].startswith("drag") or ev["type"].startswith("drop") + ] + assert "dragend" in drag_events_captured + assert "dragenter" in drag_events_captured + assert "dragleave" in drag_events_captured + assert "drop" in drag_events_captured diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse_modifier.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse_modifier.py new file mode 100644 index 0000000000..ea50951f37 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse_modifier.py @@ -0,0 +1,242 @@ +import pytest + +from webdriver.bidi.modules.input import Actions, get_element_origin +from webdriver.bidi.modules.script import ContextTarget + +from tests.support.helpers import filter_dict +from tests.support.keys import Keys + +from .. import get_events + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize( + "modifier, prop", + [ + (Keys.CONTROL, "ctrlKey"), + (Keys.R_CONTROL, "ctrlKey"), + ], +) +async def test_control_click( + bidi_session, + current_session, + top_context, + get_element, + load_static_test_page, + modifier, + prop, +): + os = current_session.capabilities["platformName"] + + await load_static_test_page(page="test_actions.html") + outer = await get_element("#outer") + + actions = Actions() + key_sources = ( + actions.add_key() + .pause(duration=0) + .key_down(modifier) + .pause(duration=200) + .key_up(modifier) + ) + mouse_sources = ( + actions.add_pointer() + .pointer_move(x=0, y=0, origin=get_element_origin(outer)) + .pointer_down(button=0) + .pointer_up(button=0) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + if os != "mac": + expected = [ + {"type": "mousemove"}, + {"type": "mousedown"}, + {"type": "mouseup"}, + {"type": "click"}, + ] + else: + expected = [ + {"type": "mousemove"}, + {"type": "mousedown"}, + {"type": "contextmenu"}, + {"type": "mouseup"}, + ] + + defaults = {"altKey": False, "metaKey": False, "shiftKey": False, "ctrlKey": False} + + for e in expected: + e.update(defaults) + if e["type"] != "mousemove": + e[prop] = True + + all_events = await get_events(bidi_session, top_context["context"]) + filtered_events = [filter_dict(e, expected[0]) for e in all_events] + assert expected == filtered_events + + +async def test_control_click_release( + bidi_session, top_context, load_static_test_page, get_focused_key_input +): + await load_static_test_page(page="test_actions.html") + key_reporter = await get_focused_key_input() + + # The context menu stays visible during subsequent tests so let's not + # display it in the first place. + await bidi_session.script.evaluate( + expression=""" + var keyReporter = document.getElementById("keys"); + document.addEventListener("contextmenu", function(e) { + e.preventDefault(); + }); + """, + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + actions = Actions() + key_sources = actions.add_key().pause(duration=0).key_down(Keys.CONTROL) + mouse_sources = ( + actions.add_pointer() + .pointer_move(x=0, y=0, origin=get_element_origin(key_reporter)) + .pointer_down(button=0) + ) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + await bidi_session.script.evaluate( + expression=""" + var keyReporter = document.getElementById("keys"); + keyReporter.addEventListener("mousedown", recordPointerEvent); + keyReporter.addEventListener("mouseup", recordPointerEvent); + resetEvents(); + """, + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + await bidi_session.input.release_actions(context=top_context["context"]) + + expected = [ + {"type": "mouseup"}, + {"type": "keyup"}, + ] + all_events = await get_events(bidi_session, top_context["context"]) + events = [filter_dict(e, expected[0]) for e in all_events] + assert events == expected + + +async def test_many_modifiers_click( + bidi_session, top_context, get_element, load_static_test_page +): + await load_static_test_page(page="test_actions.html") + outer = await get_element("#outer") + + dblclick_timeout = 800 + actions = Actions() + key_sources = ( + actions.add_key() + .pause(duration=0) + .key_down(Keys.ALT) + .key_down(Keys.SHIFT) + .pause(duration=dblclick_timeout) + .key_up(Keys.ALT) + .key_up(Keys.SHIFT) + ) + mouse_sources = ( + actions.add_pointer() + .pointer_move(x=0, y=0, origin=get_element_origin(outer)) + .pause(duration=0) + .pointer_down(button=0) + .pointer_up(button=0) + .pause(duration=0) + .pause(duration=0) + .pointer_down(button=0) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + expected = [ + {"type": "mousemove"}, + # shift and alt pressed + {"type": "mousedown"}, + {"type": "mouseup"}, + {"type": "click"}, + # no modifiers pressed + {"type": "mousedown"}, + ] + + defaults = {"altKey": False, "metaKey": False, "shiftKey": False, "ctrlKey": False} + + for e in expected: + e.update(defaults) + + for e in expected[1:4]: + e["shiftKey"] = True + e["altKey"] = True + + all_events = await get_events(bidi_session, top_context["context"]) + events = [filter_dict(e, expected[0]) for e in all_events] + assert events == expected + + +@pytest.mark.parametrize( + "modifier, prop", + [ + (Keys.ALT, "altKey"), + (Keys.R_ALT, "altKey"), + (Keys.META, "metaKey"), + (Keys.R_META, "metaKey"), + (Keys.SHIFT, "shiftKey"), + (Keys.R_SHIFT, "shiftKey"), + ], +) +async def test_modifier_click( + bidi_session, top_context, get_element, load_static_test_page, modifier, prop +): + await load_static_test_page(page="test_actions.html") + outer = await get_element("#outer") + + actions = Actions() + key_sources = ( + actions.add_key() + .pause(duration=200) + .key_down(modifier) + .pause(duration=200) + .pause(duration=0) + .key_up(modifier) + ) + mouse_sources = ( + actions.add_pointer() + .pointer_move(x=0, y=0, origin=get_element_origin(outer)) + .pause(duration=50) + .pointer_down(button=0) + .pointer_up(button=0) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + expected = [ + {"type": "mousemove"}, + {"type": "mousedown"}, + {"type": "mouseup"}, + {"type": "click"}, + ] + + defaults = {"altKey": False, "metaKey": False, "shiftKey": False, "ctrlKey": False} + + for e in expected: + e.update(defaults) + if e["type"] != "mousemove": + e[prop] = True + + all_events = await get_events(bidi_session, top_context["context"]) + filtered_events = [filter_dict(e, expected[0]) for e in all_events] + assert expected == filtered_events diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse_multiclick.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse_multiclick.py new file mode 100644 index 0000000000..f74650c2f3 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_mouse_multiclick.py @@ -0,0 +1,125 @@ +import pytest + +from webdriver.bidi.modules.input import Actions, get_element_origin +from webdriver.bidi.modules.script import ContextTarget + +from tests.support.asserts import assert_move_to_coordinates +from tests.support.helpers import filter_dict + +from .. import get_events +from . import get_element_rect + +pytestmark = pytest.mark.asyncio + + +_DBLCLICK_INTERVAL = 640 + + +@pytest.mark.parametrize("pause_during_click", [True, False]) +@pytest.mark.parametrize("click_pause", [0, 200, _DBLCLICK_INTERVAL + 10]) +async def test_dblclick_at_coordinates( + bidi_session, top_context, load_static_test_page, pause_during_click, click_pause +): + await load_static_test_page(page="test_actions.html") + + div_point = { + "x": 82, + "y": 187, + } + actions = Actions() + input_source = ( + actions.add_pointer() + .pointer_move(x=div_point["x"], y=div_point["y"]) + .pointer_down(button=0) + .pointer_up(button=0) + ) + + # Either pause before the second click, which might prevent the double click + # depending on the pause delay. Or between mousedown and mouseup for the + # second click, which will never prevent a double click. + if pause_during_click: + input_source.pointer_down(button=0).pause(duration=click_pause) + else: + input_source.pause(duration=click_pause).pointer_down(button=0) + + input_source.pointer_up(button=0) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + # mouseup that ends the drag is at the expected destination + events = await get_events(bidi_session, top_context["context"]) + + assert_move_to_coordinates(div_point, "outer", events) + + expected = [ + {"type": "mousedown", "button": 0}, + {"type": "mouseup", "button": 0}, + {"type": "click", "button": 0}, + {"type": "mousedown", "button": 0}, + {"type": "mouseup", "button": 0}, + {"type": "click", "button": 0}, + ] + + if pause_during_click or click_pause < _DBLCLICK_INTERVAL: + expected.append({"type": "dblclick", "button": 0}) + + filtered_events = [filter_dict(e, expected[0]) for e in events] + assert expected == filtered_events[1:] + + +lots_of_text = ( + "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor " + "incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud " + "exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat." +) + + +async def test_tripleclick_at_coordinates( + bidi_session, top_context, inline, get_element +): + """ + This test does a triple click on a coordinate. On desktop platforms + this will select a paragraph. On mobile this will not have the same + desired outcome as taps are handled differently on mobile. + """ + url = inline( + f"""
+ {lots_of_text} +
""" + ) + + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + + div = await get_element("div") + div_rect = await get_element_rect(bidi_session, context=top_context, element=div) + div_centre = { + "x": div_rect["x"] + div_rect["width"] / 2, + "y": div_rect["y"] + div_rect["height"] / 2, + } + + actions = Actions() + ( + actions.add_pointer() + .pointer_move(x=int(div_centre["x"]), y=int(div_centre["y"])) + .pointer_down(button=0) + .pointer_up(button=0) + .pointer_down(button=0) + .pointer_up(button=0) + .pointer_down(button=0) + .pointer_up(button=0) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + actual_text = await bidi_session.script.evaluate( + expression="document.getSelection().toString()", + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + assert actual_text["value"] == lots_of_text diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_origin.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_origin.py new file mode 100644 index 0000000000..7991eecbdb --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_origin.py @@ -0,0 +1,140 @@ +import pytest + +from webdriver.bidi.modules.input import Actions, get_element_origin +from webdriver.bidi.modules.script import ContextTarget + +from tests.support.asserts import assert_move_to_coordinates +from tests.support.helpers import filter_dict + +from .. import get_events +from . import ( + get_element_rect, + get_inview_center_bidi, + get_viewport_rect, + remote_mapping_to_dict, +) + +pytestmark = pytest.mark.asyncio + + +async def get_click_coordinates(bidi_session, context): + """Helper to get recorded click coordinates on a page generated with the + actions_origins_doc fixture.""" + result = await bidi_session.script.evaluate( + expression="window.coords", + target=ContextTarget(context["context"]), + await_promise=False, + ) + return remote_mapping_to_dict(result["value"]) + + +async def test_viewport_inside(bidi_session, top_context, get_actions_origin_page): + point = {"x": 50, "y": 50} + + url = get_actions_origin_page("width: 100px; height: 50px; background: green;") + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=url, + wait="complete", + ) + + actions = Actions() + actions.add_pointer().pointer_move(x=point["x"], y=point["y"]) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + click_coords = await get_click_coordinates(bidi_session, context=top_context) + assert click_coords["x"] == pytest.approx(point["x"], abs=1.0) + assert click_coords["y"] == pytest.approx(point["y"], abs=1.0) + + +async def test_pointer_inside(bidi_session, top_context, get_actions_origin_page): + start_point = {"x": 50, "y": 50} + offset = {"x": 10, "y": 5} + + url = get_actions_origin_page("width: 100px; height: 50px; background: green;") + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=url, + wait="complete", + ) + + actions = Actions() + ( + actions.add_pointer() + .pointer_move(x=start_point["x"], y=start_point["y"]) + .pointer_move(x=offset["x"], y=offset["y"], origin="pointer") + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + click_coords = await get_click_coordinates(bidi_session, context=top_context) + assert click_coords["x"] == pytest.approx(start_point["x"] + offset["x"], abs=1.0) + assert click_coords["y"] == pytest.approx(start_point["y"] + offset["y"], abs=1.0) + + +@pytest.mark.parametrize( + "doc", + [ + "width: 100px; height: 50px; background: green;", + """width: 100px; height: 50px; background: green; + position: relative; left: -50px; top: -25px;""", + ], + ids=["element fully visible", "element partly visible"], +) +@pytest.mark.parametrize("offset_x, offset_y", [(10, 15), (0, 0)]) +async def test_element_center_point_with_offset( + bidi_session, + top_context, + get_actions_origin_page, + get_element, + doc, + offset_x, + offset_y, +): + url = get_actions_origin_page(doc) + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=url, + wait="complete", + ) + + elem = await get_element("#inner") + center = await get_inview_center_bidi(bidi_session, context=top_context, element=elem) + + actions = Actions() + actions.add_pointer().pointer_move(x=offset_x, y=offset_y, origin=get_element_origin(elem)) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + click_coords = await get_click_coordinates(bidi_session, context=top_context) + assert click_coords["x"] == pytest.approx(center["x"] + offset_x, abs=1.0) + assert click_coords["y"] == pytest.approx(center["y"] + offset_y, abs=1.0) + + +async def test_element_larger_than_viewport( + bidi_session, top_context, get_actions_origin_page, get_element +): + url = get_actions_origin_page("width: 300vw; height: 300vh; background: green;") + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=url, + wait="complete", + ) + + elem = await get_element("#inner") + center = await get_inview_center_bidi(bidi_session, context=top_context, element=elem) + + actions = Actions() + actions.add_pointer().pointer_move(x=0, y=0, origin=get_element_origin(elem)) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + click_coords = await get_click_coordinates(bidi_session, context=top_context) + assert click_coords["x"] == pytest.approx(center["x"], abs=1.0) + assert click_coords["y"] == pytest.approx(center["y"], abs=1.0) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_pen.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_pen.py new file mode 100644 index 0000000000..30728f9a13 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_pen.py @@ -0,0 +1,73 @@ +import pytest + +from webdriver.bidi.modules.input import Actions, get_element_origin + +from .. import get_events +from . import get_inview_center_bidi + +pytestmark = pytest.mark.asyncio + + +async def test_pen_pointer_properties( + bidi_session, top_context, get_element, load_static_test_page +): + await load_static_test_page(page="test_actions_pointer.html") + + pointerArea = await get_element("#pointerArea") + center = await get_inview_center_bidi( + bidi_session, context=top_context, element=pointerArea + ) + + actions = Actions() + ( + actions.add_pointer(pointer_type="pen") + .pointer_move(x=0, y=0, origin=get_element_origin(pointerArea)) + .pointer_down(button=0, pressure=0.36, tilt_x=-72, tilt_y=9, twist=86) + .pointer_move(x=10, y=10, origin=get_element_origin(pointerArea)) + .pointer_up(button=0) + .pointer_move(x=80, y=50, origin=get_element_origin(pointerArea)) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + events = await get_events(bidi_session, top_context["context"]) + assert len(events) == 10 + event_types = [e["type"] for e in events] + assert [ + "pointerover", + "pointerenter", + "pointermove", + "pointerdown", + "pointerover", + "pointerenter", + "pointermove", + "pointerup", + "pointerout", + "pointerleave", + ] == event_types + assert events[3]["type"] == "pointerdown" + assert events[3]["pageX"] == pytest.approx(center["x"], abs=1.0) + assert events[3]["pageY"] == pytest.approx(center["y"], abs=1.0) + assert events[3]["target"] == "pointerArea" + assert events[3]["pointerType"] == "pen" + # The default value of width and height for mouse and pen inputs is 1 + assert round(events[3]["width"], 2) == 1 + assert round(events[3]["height"], 2) == 1 + assert round(events[3]["pressure"], 2) == 0.36 + assert events[3]["tiltX"] == -72 + assert events[3]["tiltY"] == 9 + assert events[3]["twist"] == 86 + assert events[6]["type"] == "pointermove" + assert events[6]["pageX"] == pytest.approx(center["x"] + 10, abs=1.0) + assert events[6]["pageY"] == pytest.approx(center["y"] + 10, abs=1.0) + assert events[6]["target"] == "pointerArea" + assert events[6]["pointerType"] == "pen" + assert round(events[6]["width"], 2) == 1 + assert round(events[6]["height"], 2) == 1 + # The default value of pressure for all inputs is 0.5, other properties are 0 + assert round(events[6]["pressure"], 2) == 0.5 + assert events[6]["tiltX"] == 0 + assert events[6]["tiltY"] == 0 + assert events[6]["twist"] == 0 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_touch.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_touch.py new file mode 100644 index 0000000000..126e72c76e --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/pointer_touch.py @@ -0,0 +1,150 @@ +import pytest + +from webdriver.bidi.modules.input import Actions, get_element_origin + +from .. import get_events +from . import get_inview_center_bidi + +pytestmark = pytest.mark.asyncio + + +async def test_touch_pointer_properties( + bidi_session, top_context, get_element, load_static_test_page +): + await load_static_test_page(page="test_actions_pointer.html") + + pointerArea = await get_element("#pointerArea") + center = await get_inview_center_bidi( + bidi_session, context=top_context, element=pointerArea + ) + + actions = Actions() + ( + actions.add_pointer(pointer_type="touch") + .pointer_move(x=0, y=0, origin=get_element_origin(pointerArea)) + .pointer_down( + button=0, + width=23, + height=31, + pressure=0.78, + tilt_x=21, + tilt_y=-8, + twist=355, + ) + .pointer_move( + x=10, + y=10, + origin=get_element_origin(pointerArea), + width=39, + height=35, + pressure=0.91, + tilt_x=-19, + tilt_y=62, + twist=345, + ) + .pointer_up(button=0) + .pointer_move(x=80, y=50, origin=get_element_origin(pointerArea)) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + events = await get_events(bidi_session, top_context["context"]) + + assert len(events) == 7 + event_types = [e["type"] for e in events] + assert [ + "pointerover", + "pointerenter", + "pointerdown", + "pointermove", + "pointerup", + "pointerout", + "pointerleave", + ] == event_types + assert events[2]["type"] == "pointerdown" + assert events[2]["pageX"] == pytest.approx(center["x"], abs=1.0) + assert events[2]["pageY"] == pytest.approx(center["y"], abs=1.0) + assert events[2]["target"] == "pointerArea" + assert events[2]["pointerType"] == "touch" + assert round(events[2]["width"], 2) == 23 + assert round(events[2]["height"], 2) == 31 + assert round(events[2]["pressure"], 2) == 0.78 + assert events[3]["type"] == "pointermove" + assert events[3]["pageX"] == pytest.approx(center["x"] + 10, abs=1.0) + assert events[3]["pageY"] == pytest.approx(center["y"] + 10, abs=1.0) + assert events[3]["target"] == "pointerArea" + assert events[3]["pointerType"] == "touch" + assert round(events[3]["width"], 2) == 39 + assert round(events[3]["height"], 2) == 35 + assert round(events[3]["pressure"], 2) == 0.91 + + +async def test_touch_pointer_properties_tilt_twist( + bidi_session, top_context, get_element, load_static_test_page +): + # This test only covers the tilt/twist properties which are + # more specific to pen-type pointers, but which the spec allows + # for generic touch pointers. Seperating this out gives better + # coverage of the basic properties in test_touch_pointer_properties + await load_static_test_page(page="test_actions_pointer.html") + + pointerArea = await get_element("#pointerArea") + center = await get_inview_center_bidi( + bidi_session, context=top_context, element=pointerArea + ) + + actions = Actions() + ( + actions.add_pointer(pointer_type="touch") + .pointer_move(x=0, y=0, origin=get_element_origin(pointerArea)) + .pointer_down( + button=0, + width=23, + height=31, + pressure=0.78, + tilt_x=21, + tilt_y=-8, + twist=355, + ) + .pointer_move( + x=10, + y=10, + origin=get_element_origin(pointerArea), + width=39, + height=35, + pressure=0.91, + tilt_x=-19, + tilt_y=62, + twist=345, + ) + .pointer_up(button=0) + .pointer_move(x=80, y=50, origin=get_element_origin(pointerArea)) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + events = await get_events(bidi_session, top_context["context"]) + + assert len(events) == 7 + event_types = [e["type"] for e in events] + assert [ + "pointerover", + "pointerenter", + "pointerdown", + "pointermove", + "pointerup", + "pointerout", + "pointerleave", + ] == event_types + assert events[2]["type"] == "pointerdown" + assert events[2]["tiltX"] == 21 + assert events[2]["tiltY"] == -8 + assert events[2]["twist"] == 355 + assert events[3]["type"] == "pointermove" + assert events[3]["tiltX"] == -19 + assert events[3]["tiltY"] == 62 + assert events[3]["twist"] == 345 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/wheel.py b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/wheel.py new file mode 100644 index 0000000000..cf96a9cd9b --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/perform_actions/wheel.py @@ -0,0 +1,81 @@ +import pytest + +from webdriver.bidi.modules.input import Actions, get_element_origin +from .. import get_events + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("delta_x, delta_y", [(0, 10), (5, 0), (5, 10)]) +async def test_wheel_scroll( + bidi_session, setup_wheel_test, top_context, get_element, delta_x, delta_y +): + actions = Actions() + + outer = await get_element("#outer") + actions.add_wheel().scroll( + x=0, y=0, delta_x=delta_x, delta_y=delta_y, origin=get_element_origin(outer) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + events = await get_events(bidi_session, top_context["context"]) + + assert len(events) == 1 + assert events[0]["type"] == "wheel" + assert events[0]["deltaX"] >= delta_x + assert events[0]["deltaY"] >= delta_y + assert events[0]["deltaZ"] == 0 + assert events[0]["target"] == "outer" + + +@pytest.mark.parametrize("delta_x, delta_y", [(0, 10), (5, 0), (5, 10)]) +async def test_wheel_scroll_iframe( + bidi_session, setup_wheel_test, top_context, get_element, delta_x, delta_y +): + actions = Actions() + + subframe = await get_element("#subframe") + actions.add_wheel().scroll( + x=0, y=0, delta_x=delta_x, delta_y=delta_y, origin=get_element_origin(subframe) + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + events = await get_events(bidi_session, top_context["context"]) + assert len(events) == 1 + assert events[0]["type"] == "wheel" + assert events[0]["deltaX"] >= delta_x + assert events[0]["deltaY"] >= delta_y + assert events[0]["deltaZ"] == 0 + assert events[0]["target"] == "iframeContent" + + +@pytest.mark.parametrize("delta_x, delta_y", [(0, 10), (5, 0), (5, 10)]) +async def test_wheel_scroll_overflow( + bidi_session, setup_wheel_test, top_context, get_element, delta_x, delta_y +): + actions = Actions() + + scrollable = await get_element("#scrollable") + + actions.add_wheel().scroll( + x=0, + y=0, + delta_x=delta_x, + delta_y=delta_y, + origin=get_element_origin(scrollable), + ) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + events = await get_events(bidi_session, top_context["context"]) + assert len(events) == 1 + assert events[0]["type"] == "wheel" + assert events[0]["deltaX"] >= delta_x + assert events[0]["deltaY"] >= delta_y + assert events[0]["deltaZ"] == 0 + assert events[0]["target"] == "scrollContent" diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/context.py b/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/context.py new file mode 100644 index 0000000000..ba2ddd1471 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/context.py @@ -0,0 +1,42 @@ +import pytest +from webdriver.bidi.modules.input import Actions +from webdriver.bidi.modules.script import ContextTarget + +from tests.support.helpers import filter_supported_key_events +from .. import get_events + +pytestmark = pytest.mark.asyncio + + +async def test_different_top_level_contexts( + bidi_session, new_tab, top_context, load_static_test_page, get_focused_key_input +): + await load_static_test_page(page="test_actions.html") + await get_focused_key_input() + + actions = Actions() + actions.add_key().key_down("a") + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + # Reset so we only see the release events + await bidi_session.script.evaluate( + expression="resetEvents()", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + # Release actions in another context + await bidi_session.input.release_actions(context=new_tab["context"]) + + events = await get_events(bidi_session, top_context["context"]) + assert len(events) == 0 + + # Release actions in right context + await bidi_session.input.release_actions(context=top_context["context"]) + + expected = [ + {"code": "KeyA", "key": "a", "type": "keyup"}, + ] + all_events = await get_events(bidi_session, top_context["context"]) + (events, expected) = filter_supported_key_events(all_events, expected) + assert events == expected diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/invalid.py new file mode 100644 index 0000000000..2adc0aa953 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/invalid.py @@ -0,0 +1,16 @@ +import pytest +from webdriver.bidi.error import InvalidArgumentException, NoSuchFrameException + + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("value", [None, True, 42, {}, []]) +async def test_params_context_invalid_type(bidi_session, value): + with pytest.raises(InvalidArgumentException): + await bidi_session.input.release_actions(context=value) + + +async def test_params_contexts_value_invalid_value(bidi_session): + with pytest.raises(NoSuchFrameException): + await bidi_session.input.release_actions(context="foo") diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/release.py b/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/release.py new file mode 100644 index 0000000000..2955314e3c --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/release.py @@ -0,0 +1,28 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget + +from .. import get_events + +pytestmark = pytest.mark.asyncio + + +async def test_release_no_actions_sends_no_events( + bidi_session, top_context, load_static_test_page, get_focused_key_input +): + await load_static_test_page(page="test_actions.html") + elem = await get_focused_key_input() + + await bidi_session.input.release_actions(context=top_context["context"]) + + keys = await bidi_session.script.call_function( + function_declaration="""(elem) => { + return elem.value; + }""", + arguments=[elem], + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + events = await get_events(bidi_session, top_context["context"]) + + assert len(keys["value"]) == 0 + assert len(events) == 0 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/sequence.py b/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/sequence.py new file mode 100644 index 0000000000..9b4535c1b5 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/input/release_actions/sequence.py @@ -0,0 +1,82 @@ +import pytest +from webdriver.bidi.modules.input import Actions +from webdriver.bidi.modules.script import ContextTarget + +from tests.support.helpers import filter_dict, filter_supported_key_events +from .. import get_events + +pytestmark = pytest.mark.asyncio + + +async def test_release_char_sequence_sends_keyup_events_in_reverse( + bidi_session, top_context, load_static_test_page, get_focused_key_input +): + await load_static_test_page(page="test_actions.html") + await get_focused_key_input() + + actions = Actions() + actions.add_key().key_down("a").key_down("b") + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + # Reset so we only see the release events + await bidi_session.script.evaluate( + expression="resetEvents()", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + await bidi_session.input.release_actions(context=top_context["context"]) + expected = [ + {"code": "KeyB", "key": "b", "type": "keyup"}, + {"code": "KeyA", "key": "a", "type": "keyup"}, + ] + all_events = await get_events(bidi_session, top_context["context"]) + (events, expected) = filter_supported_key_events(all_events, expected) + assert events == expected + + +@pytest.mark.parametrize( + "release_actions", + [True, False], + ids=["with release actions", "without release actions"], +) +async def test_release_mouse_sequence_resets_dblclick_state( + bidi_session, + top_context, + get_element, + load_static_test_page, + release_actions +): + await load_static_test_page(page="test_actions.html") + reporter = await get_element("#outer") + + actions = Actions() + actions.add_pointer(pointer_type="mouse").pointer_move( + x=0, y=0, origin=reporter["value"] + ).pointer_down(button=0).pointer_up(button=0) + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + + if release_actions: + await bidi_session.input.release_actions(context=top_context["context"]) + + await bidi_session.input.perform_actions( + actions=actions, context=top_context["context"] + ) + events = await get_events(bidi_session, top_context["context"]) + + expected = [ + {"type": "mousedown", "button": 0}, + {"type": "mouseup", "button": 0}, + {"type": "click", "button": 0}, + {"type": "mousedown", "button": 0}, + {"type": "mouseup", "button": 0}, + {"type": "click", "button": 0}, + ] + + if not release_actions: + expected.append({"type": "dblclick", "button": 0}) + + filtered_events = [filter_dict(e, expected[0]) for e in events] + assert expected == filtered_events[1:] diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/log/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/__init__.py new file mode 100644 index 0000000000..6bc6ebc407 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/__init__.py @@ -0,0 +1,129 @@ +from webdriver.bidi.modules.script import ContextTarget + +from ... import ( + any_int, + any_list, + any_string, + create_console_api_message, + recursive_compare, +) + + +def assert_base_entry( + entry, + level=any_string, + text=any_string, + timestamp=any_int, + realm=any_string, + context=None, + stacktrace=None +): + recursive_compare({ + "level": level, + "text": text, + "timestamp": timestamp, + "source": { + "realm": realm + } + }, entry) + + if stacktrace is not None: + assert "stackTrace" in entry + assert isinstance(entry["stackTrace"], object) + assert "callFrames" in entry["stackTrace"] + + call_frames = entry["stackTrace"]["callFrames"] + assert isinstance(call_frames, list) + assert len(call_frames) == len(stacktrace) + for index in range(0, len(call_frames)): + assert call_frames[index] == stacktrace[index] + + source = entry["source"] + if context is not None: + assert "context" in source + assert source["context"] == context + + +def assert_console_entry( + entry, + method=any_string, + level=any_string, + text=any_string, + args=any_list, + timestamp=any_int, + realm=any_string, + context=None, + stacktrace=None +): + assert_base_entry( + entry=entry, + level=level, + text=text, + timestamp=timestamp, + realm=realm, + context=context, + stacktrace=stacktrace) + + recursive_compare({ + "type": "console", + "method": method, + "args": args + }, entry) + + +def assert_javascript_entry( + entry, + level=any_string, + text=any_string, + timestamp=any_int, + realm=any_string, + context=None, + stacktrace=None +): + assert_base_entry( + entry=entry, + level=level, + text=text, + timestamp=timestamp, + realm=realm, + stacktrace=stacktrace, + context=context) + + recursive_compare({ + "type": "javascript", + }, entry) + + +async def create_console_api_message_from_string(bidi_session, context, type, value): + await bidi_session.script.evaluate( + expression=f"""console.{type}({value})""", + await_promise=False, + target=ContextTarget(context["context"]), + ) + + +async def create_javascript_error(bidi_session, context, error_message="foo"): + str_remote_value = {"type": "string", "value": error_message} + + result = await bidi_session.script.call_function( + function_declaration="""(error_message) => { + const script = document.createElement("script"); + script.append(document.createTextNode(`(() => { throw new Error("${error_message}") })()`)); + document.body.append(script); + + const err = new Error(error_message); + return err.toString(); + }""", + arguments=[str_remote_value], + await_promise=False, + target=ContextTarget(context["context"]), + ) + + return result["value"] + + +def create_log(bidi_session, context, log_type, text="foo"): + if log_type == "console_api_log": + return create_console_api_message(bidi_session, context, text) + if log_type == "javascript_error": + return create_javascript_error(bidi_session, context, text) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console.py new file mode 100644 index 0000000000..0105f4dfe3 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console.py @@ -0,0 +1,170 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget + +from . import assert_console_entry, create_console_api_message_from_string +from ... import any_string, int_interval + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "log_argument, expected_text", + [ + ("'TEST'", "TEST"), + ("'TWO', 'PARAMETERS'", "TWO PARAMETERS"), + ("{}", any_string), + ("['1', '2', '3']", any_string), + ("null, undefined", "null undefined"), + ], + ids=[ + "single string", + "two strings", + "empty object", + "array of strings", + "null and undefined", + ], +) +async def test_text_with_argument_variation( + bidi_session, top_context, wait_for_event, log_argument, expected_text, +): + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message_from_string( + bidi_session, top_context, "log", log_argument) + event_data = await on_entry_added + + assert_console_entry(event_data, text=expected_text, context=top_context["context"]) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "log_method, expected_level", + [ + ("assert", "error"), + ("debug", "debug"), + ("error", "error"), + ("info", "info"), + ("log", "info"), + ("table", "info"), + ("trace", "debug"), + ("warn", "warn"), + ], +) +async def test_level( + bidi_session, top_context, wait_for_event, log_method, expected_level +): + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + + if log_method == "assert": + # assert has to be called with a first falsy argument to trigger a log. + await create_console_api_message_from_string( + bidi_session, top_context, "assert", "false, 'foo'") + else: + await create_console_api_message_from_string( + bidi_session, top_context, log_method, "'foo'") + + event_data = await on_entry_added + + assert_console_entry( + event_data, text="foo", level=expected_level, method=log_method + ) + + +@pytest.mark.asyncio +async def test_timestamp(bidi_session, top_context, wait_for_event, current_time): + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + + time_start = await current_time() + + script = """new Promise(resolve => { + setTimeout(() => { + console.log('foo'); + resolve(); + }, 100); + }); + """ + await bidi_session.script.evaluate( + expression=script, + await_promise=True, + target=ContextTarget(top_context["context"]), + ) + + event_data = await on_entry_added + + time_end = await current_time() + + assert_console_entry(event_data, text="foo", timestamp=int_interval(time_start, time_end)) + + +@pytest.mark.asyncio +async def test_new_context_with_new_window(bidi_session, top_context, wait_for_event): + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message_from_string( + bidi_session, top_context, 'log', "'foo'") + event_data = await on_entry_added + assert_console_entry(event_data, text="foo", context=top_context["context"]) + + new_context = await bidi_session.browsing_context.create(type_hint="tab") + + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message_from_string( + bidi_session, new_context, 'log', "'foo_in_new_window'") + event_data = await on_entry_added + assert_console_entry(event_data, text="foo_in_new_window", context=new_context["context"]) + + +@pytest.mark.asyncio +async def test_new_context_with_refresh(bidi_session, top_context, wait_for_event): + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message_from_string( + bidi_session, top_context, 'log', "'foo'") + event_data = await on_entry_added + assert_console_entry(event_data, text="foo", context=top_context["context"]) + + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=top_context["url"], wait="complete" + ) + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message_from_string( + bidi_session, top_context, 'log', "'foo_after_refresh'") + event_data = await on_entry_added + assert_console_entry( + event_data, text="foo_after_refresh", context=top_context["context"] + ) + + +@pytest.mark.asyncio +async def test_different_contexts( + bidi_session, + top_context, + wait_for_event, + test_page_same_origin_frame, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_same_origin_frame, wait="complete" + ) + contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"]) + assert len(contexts[0]["children"]) == 1 + frame_context = contexts[0]["children"][0] + + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message_from_string( + bidi_session, top_context, "log", "'foo'") + event_data = await on_entry_added + assert_console_entry(event_data, text="foo", context=top_context["context"]) + + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message_from_string( + bidi_session, frame_context, "log", "'bar'") + event_data = await on_entry_added + assert_console_entry(event_data, text="bar", context=frame_context["context"]) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console_args.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console_args.py new file mode 100644 index 0000000000..c7c12d160c --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console_args.py @@ -0,0 +1,271 @@ +import pytest + +from . import assert_console_entry, create_console_api_message_from_string +from ... import any_string + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("data,remote_value", [ + ("undefined", {"type": "undefined"}), + ("null", {"type": "null"}), + ("'bar'", {"type": "string", "value": "bar"}), + ("42", {"type": "number", "value": 42}), + ("Number.NaN", {"type": "number", "value": "NaN"}), + ("-0", {"type": "number", "value": "-0"}), + ("Number.POSITIVE_INFINITY", {"type": "number", "value": "Infinity"}), + ("Number.NEGATIVE_INFINITY", {"type": "number", "value": "-Infinity"}), + ("false", {"type": "boolean", "value": False}), + ("42n", {"type": "bigint", "value": "42"}), +], ids=[ + "undefined", + "null", + "string", + "number", + "NaN", + "-0", + "Infinity", + "-Infinity", + "boolean", + "bigint", +]) +async def test_primitive_types( + bidi_session, top_context, wait_for_event, data, remote_value +): + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message_from_string( + bidi_session, top_context, "log", f"'foo', {data}") + event_data = await on_entry_added + args = [ + {"type": "string", "value": "foo"}, + {"type": remote_value["type"]}, + ] + if "value" in remote_value: + args[1].update({"value": remote_value["value"]}) + + # First arg is always the first argument as provided to console.log() + assert_console_entry(event_data, args=args) + + +@pytest.mark.parametrize( + "data, remote_value", + [ + ( + "(Symbol('foo'))", + { + "type": "symbol", + }, + ), + ( + "[1, 'foo', true, new RegExp(/foo/g), [1]]", + { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "string", "value": "foo"}, + {"type": "boolean", "value": True}, + { + "type": "regexp", + "value": { + "pattern": "foo", + "flags": "g", + }, + }, + {"type": "array", "value": [{"type": "number", "value": 1}]}, + ], + }, + ), + ( + "({'foo': {'bar': 'baz'}, 'qux': 'quux'})", + { + "type": "object", + "value": [ + ["foo", {"type": "object", "value": [['bar', {"type": "string", "value": "baz"}]]}], + ["qux", {"type": "string", "value": "quux"}], + ], + }, + ), + ( + "(function(){})", + { + "type": "function", + }, + ), + ( + "new RegExp(/foo/g)", + { + "type": "regexp", + "value": { + "pattern": "foo", + "flags": "g", + }, + }, + ), + ( + "new Date(1654004849000)", + { + "type": "date", + "value": "2022-05-31T13:47:29.000Z", + }, + ), + ( + "new Map([[1, 2], ['foo', 'bar'], [true, false], ['baz', [1]]])", + { + "type": "map", + "value": [ + [ + {"type": "number", "value": 1}, + {"type": "number", "value": 2}, + ], + ["foo", {"type": "string", "value": "bar"}], + [ + {"type": "boolean", "value": True}, + {"type": "boolean", "value": False}, + ], + [ + "baz", + {"type": "array", "value": [{"type": "number", "value": 1}]}, + ], + ], + }, + ), + ( + "new Set([1, 'foo', true, [1]])", + { + "type": "set", + "value": [ + {"type": "number", "value": 1}, + {"type": "string", "value": "foo"}, + {"type": "boolean", "value": True}, + {"type": "array", "value": [{"type": "number", "value": 1}]}, + ], + }, + ), + ( + "new WeakMap()", + { + "type": "weakmap", + }, + ), + ( + "new WeakSet()", + { + "type": "weakset", + }, + ), + ( + "new Error('SOME_ERROR_TEXT')", + {"type": "error"}, + ), + ( + "Promise.resolve()", + { + "type": "promise", + }, + ), + ( + "new Int32Array()", + { + "type": "typedarray", + }, + ), + ( + "new ArrayBuffer()", + { + "type": "arraybuffer", + }, + ), + ( + "window", + { + "type": "window", + }, + ), + ( + "new URL('https://example.com')", + { + "type": "object", + }, + ), + ], +) +async def test_remote_values( + bidi_session, top_context, wait_for_event, data, remote_value +): + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message_from_string( + bidi_session, top_context, "log", data + ) + event_data = await on_entry_added + arg = {"type": remote_value["type"]} + if "value" in remote_value: + arg["value"] = remote_value["value"] + + # First arg is always the first argument as provided to console.log() + assert_console_entry(event_data, args=[arg]) + + +@pytest.mark.parametrize( + "data, expected", + [ + ( + "document.querySelector('br')", + [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "br", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 0, + "attributes": {}, + "shadowRoot": None, + }, + }, + ], + ), + ( + "document.querySelector('#custom-element')", + [ + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": { + "id": "custom-element", + }, + "childNodeCount": 0, + "localName": "custom-element", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": { + "sharedId": any_string, + "type": "node", + }, + }, + }, + ], + ), + ], + ids=["basic", "shadowRoot"], +) +async def test_node( + bidi_session, get_test_page, top_context, wait_for_event, data, expected +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=get_test_page(), wait="complete" + ) + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message_from_string( + bidi_session, top_context, "log", data + ) + event_data = await on_entry_added + + assert_console_entry(event_data, args=expected) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/event_buffer.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/event_buffer.py new file mode 100644 index 0000000000..99a95df104 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/event_buffer.py @@ -0,0 +1,97 @@ +import asyncio + +import pytest + +from . import assert_base_entry, create_log + + +@pytest.mark.asyncio +@pytest.mark.parametrize("log_type", ["console_api_log", "javascript_error"]) +async def test_console_log_cached_messages( + bidi_session, wait_for_event, log_type, new_tab +): + # Clear events buffer. + await bidi_session.session.subscribe(events=["log.entryAdded"]) + await bidi_session.session.unsubscribe(events=["log.entryAdded"]) + + # Log a message before subscribing + expected_text = await create_log(bidi_session, new_tab, log_type, "cached_message") + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Subscribe + await bidi_session.session.subscribe(events=["log.entryAdded"]) + # Cached events are emitted before the subscribe command is finished. + assert len(events) == 1 + + # Check the log.entryAdded event received has the expected properties. + assert_base_entry(events[0], text=expected_text, context=new_tab["context"]) + + # Unsubscribe and re-subscribe + await bidi_session.session.unsubscribe(events=["log.entryAdded"]) + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + # Check that the cached event was not re-emitted. + assert len(events) == 1 + + on_entry_added = wait_for_event("log.entryAdded") + expected_text = await create_log(bidi_session, new_tab, log_type, "live_message") + await on_entry_added + + # Check that we only received the live message. + assert len(events) == 2 + assert_base_entry(events[1], text=expected_text, context=new_tab["context"]) + + # Unsubscribe, log a message and re-subscribe + await bidi_session.session.unsubscribe(events=["log.entryAdded"]) + expected_text = await create_log(bidi_session, new_tab, log_type, "cached_message_2") + + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + # Check that only the newly cached event was emitted + assert len(events) == 3 + assert_base_entry(events[2], text=expected_text, context=new_tab["context"]) + + await bidi_session.session.unsubscribe(events=["log.entryAdded"]) + remove_listener() + + +@pytest.mark.asyncio +@pytest.mark.parametrize("log_type", ["console_api_log", "javascript_error"]) +async def test_console_log_cached_message_after_refresh( + bidi_session, subscribe_events, new_tab, log_type +): + # Clear events buffer. + await bidi_session.session.subscribe(events=["log.entryAdded"]) + await bidi_session.session.unsubscribe(events=["log.entryAdded"]) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Log a message, refresh, log another message and subscribe + expected_text_1 = await create_log(bidi_session, new_tab, log_type, "cached_message_1") + context = new_tab["context"] + await bidi_session.browsing_context.navigate(context=context, + url='about:blank', + wait="complete") + expected_text_2 = await create_log(bidi_session, new_tab, log_type, "cached_message_2") + + await subscribe_events(events=["log.entryAdded"]) + + # Check that only the cached message was retrieved. + assert len(events) == 2 + assert_base_entry(events[0], text=expected_text_1) + assert_base_entry(events[1], text=expected_text_2) + + remove_listener() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/javascript.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/javascript.py new file mode 100644 index 0000000000..fe8a9b6b58 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/javascript.py @@ -0,0 +1,31 @@ +import math +import time + +import pytest + +from . import assert_javascript_entry, create_log +from ... import int_interval + + +@pytest.mark.asyncio +async def test_types_and_values( + bidi_session, current_time, inline, top_context, wait_for_event +): + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + + time_start = await current_time() + + expected_text = await create_log(bidi_session, top_context, "javascript_error", "cached_message") + event_data = await on_entry_added + + time_end = await current_time() + + assert_javascript_entry( + event_data, + level="error", + text=expected_text, + timestamp=int_interval(time_start, time_end), + context=top_context["context"], + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/realm.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/realm.py new file mode 100644 index 0000000000..bcfc2eafec --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/realm.py @@ -0,0 +1,32 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget + +from . import assert_console_entry + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize( + "sandbox_name", + ["", "sandbox_1"], + ids=["default realm", "sandbox"], +) +async def test_realm(bidi_session, top_context, wait_for_event, sandbox_name): + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + expected_text = "foo" + result = await bidi_session.script.evaluate( + raw_result=True, + expression=f"console.log('{expected_text}')", + await_promise=False, + target=ContextTarget(top_context["context"], sandbox=sandbox_name), + ) + event_data = await on_entry_added + + assert_console_entry( + event_data, + text=expected_text, + context=top_context["context"], + realm=result["realm"], + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/stacktrace.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/stacktrace.py new file mode 100644 index 0000000000..d226476ef7 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/stacktrace.py @@ -0,0 +1,121 @@ +import pytest + +from . import assert_console_entry, assert_javascript_entry + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "log_method, expect_stack", + [ + ("assert", True), + ("debug", False), + ("error", True), + ("info", False), + ("log", False), + ("table", False), + ("trace", True), + ("warn", True), + ], +) +async def test_console_entry_sync_callstack( + bidi_session, inline, top_context, wait_for_event, log_method, expect_stack +): + if log_method == "assert": + # assert has to be called with a first falsy argument to trigger a log. + url = inline( + f""" + + """ + ) + else: + url = inline( + f""" + + """ + ) + + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + + if expect_stack: + expected_stack = [ + {"columnNumber": 41, "functionName": "foo", "lineNumber": 4, "url": url}, + {"columnNumber": 33, "functionName": "bar", "lineNumber": 5, "url": url}, + {"columnNumber": 16, "functionName": "", "lineNumber": 6, "url": url}, + ] + else: + expected_stack = None + + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + + event_data = await on_entry_added + + assert_console_entry( + event_data, + method=log_method, + text="cheese", + stacktrace=expected_stack, + context=top_context["context"], + ) + + # Navigate to a page with no error to avoid polluting the next tests with + # JavaScript errors. + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=inline("

foo"), wait="complete" + ) + + +@pytest.mark.asyncio +async def test_javascript_entry_sync_callstack( + bidi_session, inline, top_context, wait_for_event +): + url = inline( + """ + + """ + ) + + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + + expected_stack = [ + {"columnNumber": 35, "functionName": "foo", "lineNumber": 4, "url": url}, + {"columnNumber": 29, "functionName": "bar", "lineNumber": 5, "url": url}, + {"columnNumber": 12, "functionName": "", "lineNumber": 6, "url": url}, + ] + + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + + event_data = await on_entry_added + + assert_javascript_entry( + event_data, + level="error", + text="Error: cheese", + stacktrace=expected_stack, + context=top_context["context"], + ) + + # Navigate to a page with no error to avoid polluting the next tests with + # JavaScript errors. + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=inline("

foo"), wait="complete" + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/subscription.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/subscription.py new file mode 100644 index 0000000000..4d604f6877 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/subscription.py @@ -0,0 +1,110 @@ +import asyncio + +import pytest + +from . import assert_base_entry, create_log + + +@pytest.mark.asyncio +@pytest.mark.parametrize("log_type", ["console_api_log", "javascript_error"]) +async def test_subscribe_twice(bidi_session, new_tab, wait_for_event, log_type): + # Subscribe to log.entryAdded twice and check that events are received once. + await bidi_session.session.subscribe(events=["log.entryAdded"]) + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Check for a ConsoleLogEntry. + on_entry_added = wait_for_event("log.entryAdded") + expected_text = await create_log(bidi_session, new_tab, log_type, "text1") + await on_entry_added + + assert len(events) == 1 + assert_base_entry(events[0], text=expected_text) + + # Wait for some time and check the events array again + await asyncio.sleep(0.5) + assert len(events) == 1 + + remove_listener() + + +@pytest.mark.asyncio +@pytest.mark.parametrize("log_type", ["console_api_log", "javascript_error"]) +async def test_subscribe_unsubscribe(bidi_session, new_tab, wait_for_event, log_type): + # Subscribe for log events globally + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + on_entry_added = wait_for_event("log.entryAdded") + await create_log(bidi_session, new_tab, log_type, "some text") + await on_entry_added + + # Unsubscribe from log events globally + await bidi_session.session.unsubscribe(events=["log.entryAdded"]) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + expected_text_0 = await create_log(bidi_session, new_tab, log_type, "text_0") + + # Wait for some time before checking the events array + await asyncio.sleep(0.5) + assert len(events) == 0 + + # Refresh to create a new context + context = new_tab["context"] + await bidi_session.browsing_context.navigate(context=context, + url='about:blank', + wait="complete") + + # Check we still don't receive ConsoleLogEntry events from the new context + expected_text_1 = await create_log(bidi_session, new_tab, log_type, "text_1") + + # Wait for some time before checking the events array + await asyncio.sleep(0.5) + assert len(events) == 0 + + # Refresh to create a new context. Note that we refresh to avoid getting + # cached events from the log event buffer. + context = new_tab["context"] + await bidi_session.browsing_context.navigate(context=context, + url='about:blank', + wait="complete") + + # Check that if we subscribe again, we can receive events + await bidi_session.session.subscribe(events=["log.entryAdded"]) + + # Check buffered events are emitted. + assert len(events) == 2 + + on_entry_added = wait_for_event("log.entryAdded") + expected_text_2 = await create_log(bidi_session, new_tab, log_type, "text_2") + await on_entry_added + + assert len(events) == 3 + assert_base_entry(events[0], text=expected_text_0, context=new_tab["context"]) + assert_base_entry(events[1], text=expected_text_1, context=new_tab["context"]) + assert_base_entry(events[2], text=expected_text_2, context=new_tab["context"]) + + # Check that we also get events from a new context + new_context = await bidi_session.browsing_context.create(type_hint="tab") + + on_entry_added = wait_for_event("log.entryAdded") + expected_text_3 = await create_log(bidi_session, new_context, log_type, "text_3") + await on_entry_added + + assert len(events) == 4 + assert_base_entry(events[3], text=expected_text_3, context=new_context["context"]) + + remove_listener() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/network/__init__.py new file mode 100644 index 0000000000..ea7472cfd3 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/__init__.py @@ -0,0 +1,221 @@ +from .. import ( + any_bool, + any_dict, + any_int, + any_int_or_null, + any_list, + any_string, + any_string_or_null, + recursive_compare, +) + + +def assert_cookies(request_cookies, expected_cookies): + assert len(request_cookies) == len(expected_cookies) + + # Simple helper to find a cookie by key and value only. + def match_cookie(cookie, expected): + for key in expected: + if cookie[key] != expected[key]: + return False + + return True + + for cookie in expected_cookies: + assert next(c for c in request_cookies if match_cookie(c, cookie)) is not None + + +def assert_headers(request_headers, expected_headers): + # The browser sets request headers, only assert that the expected headers + # are included in the request's headers. + assert len(request_headers) >= len(expected_headers) + for header in expected_headers: + assert next(h for h in request_headers if header == h) is not None + + +def assert_timing_info(timing_info): + recursive_compare( + { + "requestTime": any_int, + "redirectStart": any_int, + "redirectEnd": any_int, + "fetchStart": any_int, + "dnsStart": any_int, + "dnsEnd": any_int, + "connectStart": any_int, + "connectEnd": any_int, + "tlsStart": any_int, + "tlsEnd": any_int, + "requestStart": any_int, + "responseStart": any_int, + "responseEnd": any_int, + }, + timing_info, + ) + + +def assert_request_data(request_data, expected_request): + recursive_compare( + { + "bodySize": any_int_or_null, + "cookies": any_list, + "headers": any_list, + "headersSize": any_int, + "method": any_string, + "request": any_string, + "timings": any_dict, + "url": any_string, + }, + request_data, + ) + + assert_timing_info(request_data["timings"]) + + if "cookies" in expected_request: + assert_cookies(request_data["cookies"], expected_request["cookies"]) + # While recursive_compare tolerates missing entries in dict, arrays + # need to have the exact same number of items, and be in the same order. + # We don't want to assert all headers and cookies, so we do a custom + # assert for each and then delete it before using recursive_compare. + del expected_request["cookies"] + + if "headers" in expected_request: + assert_headers(request_data["headers"], expected_request["headers"]) + # Remove headers before using recursive_compare, see comment for cookies + del expected_request["headers"] + + recursive_compare(expected_request, request_data) + + +def assert_base_parameters( + event, + context=None, + redirect_count=None, + expected_request=None, +): + recursive_compare( + { + "context": any_string_or_null, + "navigation": any_string_or_null, + "redirectCount": any_int, + "request": any_dict, + "timestamp": any_int, + }, + event, + ) + + if context is not None: + assert event["context"] == context + + if redirect_count is not None: + assert event["redirectCount"] == redirect_count + + # Assert request data + if expected_request is not None: + assert_request_data(event["request"], expected_request) + + +def assert_before_request_sent_event( + event, + context=None, + redirect_count=None, + expected_request=None, +): + # Assert initiator + assert isinstance(event["initiator"], dict) + assert isinstance(event["initiator"]["type"], str) + + # Assert base parameters + assert_base_parameters( + event, + context=context, + redirect_count=redirect_count, + expected_request=expected_request, + ) + + +def assert_response_data(response_data, expected_response): + recursive_compare( + { + "bodySize": any_int_or_null, + "bytesReceived": any_int, + "content": { + "size": any_int_or_null, + }, + "fromCache": any_bool, + "headersSize": any_int_or_null, + "protocol": any_string, + "status": any_int, + "statusText": any_string, + "url": any_string, + }, + response_data, + ) + + if "headers" in expected_response: + assert_headers(response_data["headers"], expected_response["headers"]) + # Remove headers before using recursive_compare, see comment for cookies + # in assert_request_data + del expected_response["headers"] + + recursive_compare(expected_response, response_data) + + +def assert_response_event( + event, + context=None, + redirect_count=None, + expected_request=None, + expected_response=None, +): + # Assert response data + any_dict(event["response"]) + if expected_response is not None: + assert_response_data(event["response"], expected_response) + + # Assert base parameters + assert_base_parameters( + event, + context=context, + redirect_count=redirect_count, + expected_request=expected_request, + ) + +# Array of status and status text expected to be available in network events +HTTP_STATUS_AND_STATUS_TEXT = [ + (101, "Switching Protocols"), + (200, "OK"), + (201, "Created"), + (202, "Accepted"), + (203, "Non-Authoritative Information"), + (204, "No Content"), + (205, "Reset Content"), + (206, "Partial Content"), + (300, "Multiple Choices"), + (301, "Moved Permanently"), + (302, "Found"), + (303, "See Other"), + (305, "Use Proxy"), + (307, "Temporary Redirect"), + (400, "Bad Request"), + (401, "Unauthorized"), + (402, "Payment Required"), + (403, "Forbidden"), + (404, "Not Found"), + (405, "Method Not Allowed"), + (406, "Not Acceptable"), + (407, "Proxy Authentication Required"), + (408, "Request Timeout"), + (409, "Conflict"), + (410, "Gone"), + (411, "Length Required"), + (412, "Precondition Failed"), + (415, "Unsupported Media Type"), + (417, "Expectation Failed"), + (500, "Internal Server Error"), + (501, "Not Implemented"), + (502, "Bad Gateway"), + (503, "Service Unavailable"), + (504, "Gateway Timeout"), + (505, "HTTP Version Not Supported"), +] diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/before_request_sent.py b/testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/before_request_sent.py new file mode 100644 index 0000000000..39e5b5a3cf --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/before_request_sent.py @@ -0,0 +1,283 @@ +import asyncio + +import pytest + +from webdriver.bidi.modules.script import ContextTarget + +from tests.support.sync import AsyncPoll + +from .. import assert_before_request_sent_event + +PAGE_EMPTY_HTML = "/webdriver/tests/bidi/network/support/empty.html" +PAGE_EMPTY_TEXT = "/webdriver/tests/bidi/network/support/empty.txt" +PAGE_REDIRECT_HTTP_EQUIV = ( + "/webdriver/tests/bidi/network/support/redirect_http_equiv.html" +) +PAGE_REDIRECTED_HTML = "/webdriver/tests/bidi/network/support/redirected.html" + + +@pytest.mark.asyncio +async def test_subscribe_status(bidi_session, top_context, wait_for_event, url, fetch): + await bidi_session.session.subscribe(events=["network.beforeRequestSent"]) + + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=url(PAGE_EMPTY_HTML), + wait="complete", + ) + + # Track all received network.beforeRequestSent events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener( + "network.beforeRequestSent", on_event + ) + + text_url = url(PAGE_EMPTY_TEXT) + on_before_request_sent = wait_for_event("network.beforeRequestSent") + await fetch(text_url) + await on_before_request_sent + + assert len(events) == 1 + expected_request = {"method": "GET", "url": text_url} + assert_before_request_sent_event( + events[0], + expected_request=expected_request, + redirect_count=0, + ) + + await bidi_session.session.unsubscribe(events=["network.beforeRequestSent"]) + + # Fetch the text url again, with an additional parameter to bypass the cache + # and check no new event is received. + await fetch(f"{text_url}?nocache") + await asyncio.sleep(0.5) + assert len(events) == 1 + + remove_listener() + + +@pytest.mark.asyncio +async def test_load_page_twice( + bidi_session, top_context, wait_for_event, url, fetch, setup_network_test +): + html_url = url(PAGE_EMPTY_HTML) + + network_events = await setup_network_test(events=["network.beforeRequestSent"]) + events = network_events["network.beforeRequestSent"] + + on_before_request_sent = wait_for_event("network.beforeRequestSent") + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=html_url, + wait="complete", + ) + await on_before_request_sent + + assert len(events) == 1 + expected_request = {"method": "GET", "url": html_url} + assert_before_request_sent_event( + events[0], + expected_request=expected_request, + redirect_count=0, + ) + + +@pytest.mark.parametrize( + "method", + [ + "GET", + "HEAD", + "POST", + "PUT", + "DELETE", + "OPTIONS", + "PATCH", + ], +) +@pytest.mark.asyncio +async def test_request_method( + bidi_session, wait_for_event, url, fetch, setup_network_test, method +): + text_url = url(PAGE_EMPTY_TEXT) + + network_events = await setup_network_test(events=["network.beforeRequestSent"]) + events = network_events["network.beforeRequestSent"] + + on_before_request_sent = wait_for_event("network.beforeRequestSent") + await fetch(text_url, method=method) + await on_before_request_sent + + assert len(events) == 1 + expected_request = {"method": method, "url": text_url} + assert_before_request_sent_event( + events[0], + expected_request=expected_request, + redirect_count=0, + ) + + +@pytest.mark.asyncio +async def test_request_headers( + bidi_session, wait_for_event, url, fetch, setup_network_test +): + text_url = url(PAGE_EMPTY_TEXT) + + + network_events = await setup_network_test(events=["network.beforeRequestSent"]) + events = network_events["network.beforeRequestSent"] + + on_before_request_sent = wait_for_event("network.beforeRequestSent") + await fetch(text_url, method="GET", headers={"foo": "bar"}) + await on_before_request_sent + + assert len(events) == 1 + expected_request = { + "headers": ({"name": "foo", "value": "bar"},), + "method": "GET", + "url": text_url, + } + assert_before_request_sent_event( + events[0], + expected_request=expected_request, + redirect_count=0, + ) + + +@pytest.mark.asyncio +async def test_request_cookies( + bidi_session, top_context, wait_for_event, url, fetch, setup_network_test +): + text_url = url(PAGE_EMPTY_TEXT) + + + network_events = await setup_network_test(events=["network.beforeRequestSent"]) + events = network_events["network.beforeRequestSent"] + + await bidi_session.script.evaluate( + expression="document.cookie = 'foo=bar';", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + on_before_request_sent = wait_for_event("network.beforeRequestSent") + await fetch(text_url, method="GET") + await on_before_request_sent + + assert len(events) == 1 + expected_request = { + "cookies": ({"name": "foo", "value": "bar"},), + "method": "GET", + "url": text_url, + } + assert_before_request_sent_event( + events[0], + expected_request=expected_request, + redirect_count=0, + ) + + await bidi_session.script.evaluate( + expression="document.cookie = 'fuu=baz';", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + on_before_request_sent = wait_for_event("network.beforeRequestSent") + await fetch(text_url, method="GET") + await on_before_request_sent + + assert len(events) == 2 + + expected_request = { + "cookies": ( + {"name": "foo", "value": "bar"}, + {"name": "fuu", "value": "baz"}, + ), + "method": "GET", + "url": text_url, + } + assert_before_request_sent_event( + events[1], + expected_request=expected_request, + redirect_count=0, + ) + + +@pytest.mark.asyncio +async def test_redirect(bidi_session, wait_for_event, url, fetch, setup_network_test): + text_url = url(PAGE_EMPTY_TEXT) + redirect_url = url( + f"/webdriver/tests/support/http_handlers/redirect.py?location={text_url}" + ) + + + network_events = await setup_network_test(events=["network.beforeRequestSent"]) + events = network_events["network.beforeRequestSent"] + + await fetch(redirect_url, method="GET") + + # Wait until we receive two events, one for the initial request and one for + # the redirection. + wait = AsyncPoll(bidi_session, timeout=2) + await wait.until(lambda _: len(events) >= 2) + + assert len(events) == 2 + expected_request = {"method": "GET", "url": redirect_url} + assert_before_request_sent_event( + events[0], + expected_request=expected_request, + redirect_count=0, + ) + expected_request = {"method": "GET", "url": text_url} + assert_before_request_sent_event( + events[1], expected_request=expected_request, redirect_count=1 + ) + + # Check that both requests share the same requestId + assert events[0]["request"]["request"] == events[1]["request"]["request"] + + +@pytest.mark.asyncio +async def test_redirect_http_equiv( + bidi_session, top_context, wait_for_event, url, setup_network_test +): + # PAGE_REDIRECT_HTTP_EQUIV should redirect to PAGE_REDIRECTED_HTML immediately + http_equiv_url = url(PAGE_REDIRECT_HTTP_EQUIV) + redirected_url = url(PAGE_REDIRECTED_HTML) + + + network_events = await setup_network_test(events=["network.beforeRequestSent"]) + events = network_events["network.beforeRequestSent"] + + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=http_equiv_url, + wait="complete", + ) + + # Wait until we receive two events, one for the initial request and one for + # the http-equiv "redirect". + wait = AsyncPoll(bidi_session, timeout=2) + await wait.until(lambda _: len(events) >= 2) + + assert len(events) == 2 + expected_request = {"method": "GET", "url": http_equiv_url} + assert_before_request_sent_event( + events[0], + expected_request=expected_request, + redirect_count=0, + ) + # http-equiv redirect should not be considered as a redirect: redirect_count + # should be 0. + expected_request = {"method": "GET", "url": redirected_url} + assert_before_request_sent_event( + events[1], + expected_request=expected_request, + redirect_count=0, + ) + + # Check that the http-equiv redirect request has a different requestId + assert events[0]["request"]["request"] != events[1]["request"]["request"] diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/combined/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/network/combined/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/combined/network_events.py b/testing/web-platform/tests/webdriver/tests/bidi/network/combined/network_events.py new file mode 100644 index 0000000000..7f8f134af0 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/combined/network_events.py @@ -0,0 +1,121 @@ +import asyncio + +import pytest + +from .. import ( + assert_before_request_sent_event, + assert_response_event, +) + +PAGE_EMPTY_HTML = "/webdriver/tests/bidi/network/support/empty.html" +PAGE_EMPTY_TEXT = "/webdriver/tests/bidi/network/support/empty.txt" + + +@pytest.mark.asyncio +async def test_same_request_id( + bidi_session, top_context, wait_for_event, url, setup_network_test, fetch +): + network_events = await setup_network_test( + events=[ + "network.beforeRequestSent", + "network.responseStarted", + "network.responseCompleted", + ] + ) + before_request_sent_events = network_events["network.beforeRequestSent"] + response_started_events = network_events["network.responseStarted"] + response_completed_events = network_events["network.responseCompleted"] + + text_url = url(PAGE_EMPTY_TEXT) + on_response_completed = wait_for_event("network.responseCompleted") + await fetch(text_url) + await on_response_completed + + assert len(before_request_sent_events) == 1 + assert len(response_started_events) == 1 + assert len(response_completed_events) == 1 + expected_request = {"method": "GET", "url": text_url} + assert_before_request_sent_event( + before_request_sent_events[0], expected_request=expected_request + ) + + expected_response = {"url": text_url} + assert_response_event( + response_started_events[0], + expected_request=expected_request, + expected_response=expected_response, + ) + assert_response_event( + response_completed_events[0], + expected_request=expected_request, + expected_response=expected_response, + ) + + assert ( + before_request_sent_events[0]["request"]["request"] + == response_started_events[0]["request"]["request"] + ) + + assert ( + before_request_sent_events[0]["request"]["request"] + == response_completed_events[0]["request"]["request"] + ) + + +@pytest.mark.asyncio +async def test_subscribe_to_one_context( + bidi_session, top_context, wait_for_event, url, fetch, setup_network_test +): + other_context = await bidi_session.browsing_context.create(type_hint="tab") + await bidi_session.browsing_context.navigate( + context=other_context["context"], + url=url(PAGE_EMPTY_HTML), + wait="complete", + ) + + network_events = await setup_network_test( + events=[ + "network.beforeRequestSent", + "network.responseStarted", + "network.responseCompleted", + ], + contexts=[top_context["context"]], + ) + + # Perform a fetch request in the subscribed context and wait for the response completed event. + text_url = url(PAGE_EMPTY_TEXT) + on_response_completed = wait_for_event("network.responseCompleted") + await fetch(text_url, context=top_context) + await on_response_completed + + assert len(network_events["network.beforeRequestSent"]) == 1 + assert len(network_events["network.responseStarted"]) == 1 + assert len(network_events["network.responseCompleted"]) == 1 + + # Check the received events have the correct context. + expected_request = {"method": "GET", "url": text_url} + expected_response = {"url": text_url} + assert_before_request_sent_event( + network_events["network.beforeRequestSent"][0], + expected_request=expected_request, + context=top_context["context"], + ) + assert_response_event( + network_events["network.responseStarted"][0], + expected_response=expected_response, + context=top_context["context"], + ) + assert_response_event( + network_events["network.responseCompleted"][0], + expected_response=expected_response, + context=top_context["context"], + ) + + # Perform another fetch request in the other context. + await fetch(text_url, context=other_context) + await asyncio.sleep(0.5) + + # Check that no other event was received. + assert len(network_events["network.beforeRequestSent"]) == 1 + assert len(network_events["network.responseStarted"]) == 1 + assert len(network_events["network.responseCompleted"]) == 1 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/conftest.py b/testing/web-platform/tests/webdriver/tests/bidi/network/conftest.py new file mode 100644 index 0000000000..5a6c6551ff --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/conftest.py @@ -0,0 +1,98 @@ +import json + +import pytest +import pytest_asyncio + +from webdriver.bidi.modules.script import ContextTarget + +RESPONSE_COMPLETED_EVENT = "network.responseCompleted" + +PAGE_EMPTY_HTML = "/webdriver/tests/bidi/network/support/empty.html" + + +@pytest.fixture +def fetch(bidi_session, top_context, configuration): + """Perform a fetch from the page of the provided context, default to the + top context. + """ + async def fetch(url, method="GET", headers=None, context=top_context, timeout_in_seconds=3): + method_arg = f"method: '{method}'," + + headers_arg = "" + if headers != None: + headers_arg = f"headers: {json.dumps(headers)}," + + timeout_in_seconds = timeout_in_seconds * configuration["timeout_multiplier"] + + # Wait for fetch() to resolve a response and for response.text() to + # resolve as well to make sure the request/response is completed when + # the helper returns. + await bidi_session.script.evaluate( + expression=f""" + {{ + const controller = new AbortController(); + setTimeout(() => controller.abort(), {timeout_in_seconds * 1000}); + fetch("{url}", {{ + {method_arg} + {headers_arg} + signal: controller.signal + }}).then(response => response.text()); + }}""", + target=ContextTarget(context["context"]), + await_promise=True, + ) + + return fetch + + +@pytest_asyncio.fixture +async def setup_network_test( + bidi_session, subscribe_events, wait_for_event, top_context, url +): + """Navigate the current top level context to the provided url and subscribe + to network.beforeRequestSent. + + Returns an `events` dictionary in which the captured network events will be added. + The keys of the dictionary are network event names (eg. "network.beforeRequestSent"), + and the value is an array of collected events. + """ + listeners = [] + + async def _setup_network_test(events, test_url=url(PAGE_EMPTY_HTML), contexts=None): + nonlocal listeners + + # Listen for network.responseCompleted for the initial navigation to + # make sure this event will not be captured unexpectedly by the tests. + await bidi_session.session.subscribe( + events=[RESPONSE_COMPLETED_EVENT], contexts=[top_context["context"]] + ) + on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT) + + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=test_url, + wait="complete", + ) + await on_response_completed + await bidi_session.session.unsubscribe( + events=[RESPONSE_COMPLETED_EVENT], contexts=[top_context["context"]] + ) + + await subscribe_events(events, contexts) + + network_events = {} + for event in events: + network_events[event] = [] + + async def on_event(method, data, event=event): + network_events[event].append(data) + + listeners.append(bidi_session.add_event_listener(event, on_event)) + + return network_events + + yield _setup_network_test + + # cleanup + for remove_listener in listeners: + remove_listener() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed.py b/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed.py new file mode 100644 index 0000000000..1df23cda59 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed.py @@ -0,0 +1,264 @@ +import asyncio +import json + +import pytest + +from webdriver.bidi.modules.script import ContextTarget + +from tests.support.sync import AsyncPoll + +from ... import any_int +from .. import assert_response_event, HTTP_STATUS_AND_STATUS_TEXT + +PAGE_EMPTY_HTML = "/webdriver/tests/bidi/network/support/empty.html" +PAGE_EMPTY_IMAGE = "/webdriver/tests/bidi/network/support/empty.png" +PAGE_EMPTY_SCRIPT = "/webdriver/tests/bidi/network/support/empty.js" +PAGE_EMPTY_SVG = "/webdriver/tests/bidi/network/support/empty.svg" +PAGE_EMPTY_TEXT = "/webdriver/tests/bidi/network/support/empty.txt" + +RESPONSE_COMPLETED_EVENT = "network.responseCompleted" + + +@pytest.mark.asyncio +async def test_subscribe_status(bidi_session, top_context, wait_for_event, url, fetch): + await bidi_session.session.subscribe(events=[RESPONSE_COMPLETED_EVENT]) + + # Track all received network.responseCompleted events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener( + RESPONSE_COMPLETED_EVENT, on_event + ) + + html_url = url(PAGE_EMPTY_HTML) + on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT) + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=html_url, + wait="complete", + ) + await on_response_completed + + assert len(events) == 1 + expected_request = {"method": "GET", "url": html_url} + expected_response = { + "url": url(PAGE_EMPTY_HTML), + "fromCache": False, + "mimeType": "text/html", + "status": 200, + "statusText": "OK", + } + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + redirect_count=0, + ) + + text_url = url(PAGE_EMPTY_TEXT) + on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT) + await fetch(text_url) + await on_response_completed + + assert len(events) == 2 + expected_request = {"method": "GET", "url": text_url} + expected_response = { + "url": text_url, + "fromCache": False, + "mimeType": "text/plain", + "status": 200, + "statusText": "OK", + } + assert_response_event( + events[1], + expected_request=expected_request, + expected_response=expected_response, + redirect_count=0, + ) + + await bidi_session.session.unsubscribe(events=[RESPONSE_COMPLETED_EVENT]) + + # Fetch the text url again, with an additional parameter to bypass the cache + # and check no new event is received. + await fetch(f"{text_url}?nocache") + await asyncio.sleep(0.5) + assert len(events) == 2 + + remove_listener() + + +@pytest.mark.asyncio +async def test_load_page_twice( + bidi_session, top_context, wait_for_event, url, fetch, setup_network_test +): + html_url = url(PAGE_EMPTY_HTML) + + network_events = await setup_network_test(events=[RESPONSE_COMPLETED_EVENT]) + events = network_events[RESPONSE_COMPLETED_EVENT] + + on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT) + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=html_url, + wait="complete", + ) + await on_response_completed + + assert len(events) == 1 + expected_request = {"method": "GET", "url": html_url} + expected_response = { + "url": html_url, + "fromCache": False, + "mimeType": "text/html", + "status": 200, + "statusText": "OK", + "protocol": "http/1.1", + } + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + redirect_count=0, + ) + + +@pytest.mark.parametrize( + "status, status_text", + HTTP_STATUS_AND_STATUS_TEXT, +) +@pytest.mark.asyncio +async def test_response_status( + bidi_session, wait_for_event, url, fetch, setup_network_test, status, status_text +): + status_url = url( + f"/webdriver/tests/support/http_handlers/status.py?status={status}&nocache={RESPONSE_COMPLETED_EVENT}" + ) + + network_events = await setup_network_test(events=[RESPONSE_COMPLETED_EVENT]) + events = network_events[RESPONSE_COMPLETED_EVENT] + + on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT) + await fetch(status_url) + await on_response_completed + + assert len(events) == 1 + expected_request = {"method": "GET", "url": status_url} + expected_response = { + "url": status_url, + "fromCache": False, + "mimeType": "text/plain", + "status": status, + "statusText": status_text, + "protocol": "http/1.1", + } + assert_response_event( + events[0], + expected_response=expected_response, + redirect_count=0, + ) + + +@pytest.mark.asyncio +async def test_response_headers( + bidi_session, wait_for_event, url, fetch, setup_network_test +): + headers_url = url( + "/webdriver/tests/support/http_handlers/headers.py?header=foo:bar&header=baz:biz" + ) + + network_events = await setup_network_test(events=[RESPONSE_COMPLETED_EVENT]) + events = network_events[RESPONSE_COMPLETED_EVENT] + + on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT) + await fetch(headers_url, method="GET") + await on_response_completed + + assert len(events) == 1 + + expected_request = {"method": "GET", "url": headers_url} + expected_response = { + "url": headers_url, + "fromCache": False, + "mimeType": "text/plain", + "status": 200, + "statusText": "OK", + "headers": ( + {"name": "foo", "value": "bar"}, + {"name": "baz", "value": "biz"}, + ), + "protocol": "http/1.1", + } + assert_response_event( + events[0], + expected_request=expected_request, + redirect_count=0, + ) + + +@pytest.mark.parametrize( + "page_url, mime_type", + [ + (PAGE_EMPTY_HTML, "text/html"), + (PAGE_EMPTY_TEXT, "text/plain"), + (PAGE_EMPTY_SCRIPT, "text/javascript"), + (PAGE_EMPTY_IMAGE, "image/png"), + (PAGE_EMPTY_SVG, "image/svg+xml"), + ], +) +@pytest.mark.asyncio +async def test_response_mime_type_file( + bidi_session, url, wait_for_event, fetch, setup_network_test, page_url, mime_type +): + network_events = await setup_network_test(events=[RESPONSE_COMPLETED_EVENT]) + events = network_events[RESPONSE_COMPLETED_EVENT] + + on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT) + await fetch(url(page_url), method="GET") + await on_response_completed + + assert len(events) == 1 + + expected_request = {"method": "GET", "url": url(page_url)} + expected_response = {"url": url(page_url), "mimeType": mime_type} + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + redirect_count=0, + ) + + +@pytest.mark.asyncio +async def test_redirect(bidi_session, wait_for_event, url, fetch, setup_network_test): + text_url = url(PAGE_EMPTY_TEXT) + redirect_url = url( + f"/webdriver/tests/support/http_handlers/redirect.py?location={text_url}" + ) + + network_events = await setup_network_test(events=[RESPONSE_COMPLETED_EVENT]) + events = network_events[RESPONSE_COMPLETED_EVENT] + + await fetch(redirect_url, method="GET") + + # Wait until we receive two events, one for the initial request and one for + # the redirection. + wait = AsyncPoll(bidi_session, timeout=2) + await wait.until(lambda _: len(events) >= 2) + + assert len(events) == 2 + expected_request = {"method": "GET", "url": redirect_url} + assert_response_event( + events[0], + expected_request=expected_request, + redirect_count=0, + ) + expected_request = {"method": "GET", "url": text_url} + assert_response_event( + events[1], expected_request=expected_request, redirect_count=1 + ) + + # Check that both requests share the same requestId + assert events[0]["request"]["request"] == events[1]["request"]["request"] diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed_cached.py b/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed_cached.py new file mode 100644 index 0000000000..eb34488508 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed_cached.py @@ -0,0 +1,196 @@ +import pytest +import random + +from tests.support.sync import AsyncPoll + +from .. import assert_response_event + +PAGE_EMPTY_TEXT = "/webdriver/tests/bidi/network/support/empty.txt" + + +@pytest.mark.asyncio +async def test_cached( + bidi_session, + top_context, + wait_for_event, + url, + fetch, + setup_network_test, +): + network_events = await setup_network_test( + events=[ + "network.responseCompleted", + ] + ) + events = network_events["network.responseCompleted"] + + cached_url = url( + f"/webdriver/tests/support/http_handlers/cached.py?status=200&nocache={random.random()}" + ) + on_response_completed = wait_for_event("network.responseCompleted") + await fetch(cached_url) + await on_response_completed + + assert len(events) == 1 + expected_request = {"method": "GET", "url": cached_url} + + # The first request/response is used to fill the browser cache, so we expect + # fromCache to be False here. + expected_response = { + "url": cached_url, + "fromCache": False, + "status": 200, + } + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + ) + + on_response_completed = wait_for_event("network.responseCompleted") + await fetch(cached_url) + await on_response_completed + + assert len(events) == 2 + + # The second request for the same URL has to be read from the local cache. + expected_response = { + "url": cached_url, + "fromCache": True, + "status": 200, + } + assert_response_event( + events[1], + expected_request=expected_request, + expected_response=expected_response, + ) + + +@pytest.mark.asyncio +async def test_cached_redirect( + bidi_session, + top_context, + wait_for_event, + url, + fetch, + setup_network_test, +): + network_events = await setup_network_test( + events=[ + "network.responseCompleted", + ] + ) + events = network_events["network.responseCompleted"] + + text_url = url(PAGE_EMPTY_TEXT) + cached_url = url( + f"/webdriver/tests/support/http_handlers/cached.py?status=301&location={text_url}&nocache={random.random()}" + ) + + await fetch(cached_url) + + # Expect two events, one for the initial request and one for the redirect. + wait = AsyncPoll(bidi_session, timeout=2) + await wait.until(lambda _: len(events) >= 2) + assert len(events) == 2 + + # The first request/response is used to fill the cache, so we expect + # fromCache to be False here. + expected_request = {"method": "GET", "url": cached_url} + expected_response = { + "url": cached_url, + "fromCache": False, + "status": 301, + } + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + ) + + # The second request is the redirect + redirected_request = {"method": "GET", "url": text_url} + redirected_response = {"url": text_url, "status": 200} + assert_response_event( + events[1], + expected_request=redirected_request, + expected_response=redirected_response, + ) + + await fetch(cached_url) + wait = AsyncPoll(bidi_session, timeout=2) + await wait.until(lambda _: len(events) >= 4) + assert len(events) == 4 + + # The third request hits cached_url again and has to be read from the local cache. + expected_response = { + "url": cached_url, + "fromCache": True, + "status": 301, + } + assert_response_event( + events[2], + expected_request=expected_request, + expected_response=expected_response, + ) + + # The fourth request is the redirect + assert_response_event( + events[3], + expected_request=redirected_request, + expected_response=redirected_response, + ) + + +@pytest.mark.asyncio +async def test_cached_revalidate( + bidi_session, top_context, wait_for_event, url, fetch, setup_network_test +): + network_events = await setup_network_test( + events=[ + "network.responseCompleted", + ] + ) + events = network_events["network.responseCompleted"] + + revalidate_url = url( + f"/webdriver/tests/support/http_handlers/must-revalidate.py?nocache={random.random()}" + ) + on_response_completed = wait_for_event("network.responseCompleted") + await fetch(revalidate_url) + await on_response_completed + + assert len(events) == 1 + expected_request = {"method": "GET", "url": revalidate_url} + expected_response = { + "url": revalidate_url, + "fromCache": False, + "status": 200, + } + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + ) + + on_response_completed = wait_for_event("network.responseCompleted") + + # Note that we pass a specific header so that the must-revalidate.py handler + # can decide to return a 304 without having to use another URL. + await fetch(revalidate_url, headers={"return-304": "true"}) + await on_response_completed + + assert len(events) == 2 + + # Here fromCache should still be false, because for a 304 response the response + # cache state is "validated" and fromCache is only true if cache state is "local" + expected_response = { + "url": revalidate_url, + "fromCache": False, + "status": 304, + } + assert_response_event( + events[1], + expected_request=expected_request, + expected_response=expected_response, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started.py b/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started.py new file mode 100644 index 0000000000..e3fd7a4d35 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started.py @@ -0,0 +1,241 @@ +import asyncio +import json + +import pytest + +from webdriver.bidi.modules.script import ContextTarget + +from tests.support.sync import AsyncPoll + +from ... import any_int +from .. import assert_response_event, HTTP_STATUS_AND_STATUS_TEXT + +PAGE_EMPTY_HTML = "/webdriver/tests/bidi/network/support/empty.html" +PAGE_EMPTY_IMAGE = "/webdriver/tests/bidi/network/support/empty.png" +PAGE_EMPTY_SCRIPT = "/webdriver/tests/bidi/network/support/empty.js" +PAGE_EMPTY_SVG = "/webdriver/tests/bidi/network/support/empty.svg" +PAGE_EMPTY_TEXT = "/webdriver/tests/bidi/network/support/empty.txt" + +RESPONSE_STARTED_EVENT = "network.responseStarted" + + +@pytest.mark.asyncio +async def test_subscribe_status(bidi_session, top_context, wait_for_event, url, fetch): + await bidi_session.session.subscribe(events=[RESPONSE_STARTED_EVENT]) + + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=url(PAGE_EMPTY_HTML), + wait="complete", + ) + + # Track all received network.responseStarted events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener( + RESPONSE_STARTED_EVENT, on_event + ) + + text_url = url(PAGE_EMPTY_TEXT) + on_response_started = wait_for_event(RESPONSE_STARTED_EVENT) + await fetch(text_url) + await on_response_started + + assert len(events) == 1 + expected_request = {"method": "GET", "url": text_url} + expected_response = { + "url": text_url, + "fromCache": False, + "mimeType": "text/plain", + "status": 200, + "statusText": "OK", + } + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + redirect_count=0, + ) + + await bidi_session.session.unsubscribe(events=[RESPONSE_STARTED_EVENT]) + + # Fetch the text url again, with an additional parameter to bypass the cache + # and check no new event is received. + await fetch(f"{text_url}?nocache") + await asyncio.sleep(0.5) + assert len(events) == 1 + + remove_listener() + + +@pytest.mark.asyncio +async def test_load_page_twice( + bidi_session, top_context, wait_for_event, url, fetch, setup_network_test +): + html_url = url(PAGE_EMPTY_HTML) + + network_events = await setup_network_test(events=[RESPONSE_STARTED_EVENT]) + events = network_events[RESPONSE_STARTED_EVENT] + + on_response_started = wait_for_event(RESPONSE_STARTED_EVENT) + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=html_url, + wait="complete", + ) + await on_response_started + + assert len(events) == 1 + expected_request = {"method": "GET", "url": html_url} + expected_response = { + "url": html_url, + "fromCache": False, + "mimeType": "text/html", + "status": 200, + "statusText": "OK", + "protocol": "http/1.1", + } + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + redirect_count=0, + ) + + +@pytest.mark.parametrize( + "status, status_text", + HTTP_STATUS_AND_STATUS_TEXT, +) +@pytest.mark.asyncio +async def test_response_status( + bidi_session, wait_for_event, url, fetch, setup_network_test, status, status_text +): + status_url = url(f"/webdriver/tests/support/http_handlers/status.py?status={status}&nocache={RESPONSE_STARTED_EVENT}") + + network_events = await setup_network_test(events=[RESPONSE_STARTED_EVENT]) + events = network_events[RESPONSE_STARTED_EVENT] + + on_response_started = wait_for_event(RESPONSE_STARTED_EVENT) + await fetch(status_url) + await on_response_started + + assert len(events) == 1 + expected_request = {"method": "GET", "url": status_url} + expected_response = { + "url": status_url, + "fromCache": False, + "mimeType": "text/plain", + "status": status, + "statusText": status_text, + "protocol": "http/1.1", + } + assert_response_event( + events[0], + expected_response=expected_response, + redirect_count=0, + ) + + +@pytest.mark.asyncio +async def test_response_headers( + bidi_session, wait_for_event, url, fetch, setup_network_test +): + headers_url = url( + "/webdriver/tests/support/http_handlers/headers.py?header=foo:bar&header=baz:biz" + ) + + network_events = await setup_network_test(events=[RESPONSE_STARTED_EVENT]) + events = network_events[RESPONSE_STARTED_EVENT] + + on_response_started = wait_for_event(RESPONSE_STARTED_EVENT) + await fetch(headers_url, method="GET") + await on_response_started + + assert len(events) == 1 + + expected_request = {"method": "GET", "url": headers_url} + expected_response = { + "url": headers_url, + "fromCache": False, + "mimeType": "text/plain", + "status": 200, + "statusText": "OK", + "headers": ( + {"name": "foo", "value": "bar"}, + {"name": "baz", "value": "biz"}, + ), + "protocol": "http/1.1", + } + assert_response_event( + events[0], + expected_request=expected_request, + redirect_count=0, + ) + + +@pytest.mark.parametrize( + "page_url, mime_type", + [ + (PAGE_EMPTY_HTML, "text/html"), + (PAGE_EMPTY_TEXT, "text/plain"), + (PAGE_EMPTY_SCRIPT, "text/javascript"), + (PAGE_EMPTY_IMAGE, "image/png"), + (PAGE_EMPTY_SVG, "image/svg+xml"), + ], +) +@pytest.mark.asyncio +async def test_response_mime_type_file( + bidi_session, url, wait_for_event, fetch, setup_network_test, page_url, mime_type +): + network_events = await setup_network_test(events=[RESPONSE_STARTED_EVENT]) + events = network_events[RESPONSE_STARTED_EVENT] + + on_response_started = wait_for_event(RESPONSE_STARTED_EVENT) + await fetch(url(page_url), method="GET") + await on_response_started + + assert len(events) == 1 + + expected_request = {"method": "GET", "url": url(page_url)} + expected_response = {"url": url(page_url), "mimeType": mime_type} + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + redirect_count=0, + ) + + +@pytest.mark.asyncio +async def test_redirect(bidi_session, wait_for_event, url, fetch, setup_network_test): + text_url = url(PAGE_EMPTY_TEXT) + redirect_url = url(f"/webdriver/tests/support/http_handlers/redirect.py?location={text_url}") + + network_events = await setup_network_test(events=[RESPONSE_STARTED_EVENT]) + events = network_events[RESPONSE_STARTED_EVENT] + + await fetch(redirect_url, method="GET") + + # Wait until we receive two events, one for the initial request and one for + # the redirection. + wait = AsyncPoll(bidi_session, timeout=2) + await wait.until(lambda _: len(events) >= 2) + + assert len(events) == 2 + expected_request = {"method": "GET", "url": redirect_url} + assert_response_event( + events[0], + expected_request=expected_request, + redirect_count=0, + ) + expected_request = {"method": "GET", "url": text_url} + assert_response_event( + events[1], expected_request=expected_request, redirect_count=1 + ) + + # Check that both requests share the same requestId + assert events[0]["request"]["request"] == events[1]["request"]["request"] diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started_cached.py b/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started_cached.py new file mode 100644 index 0000000000..15373b7107 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started_cached.py @@ -0,0 +1,204 @@ +import pytest +import random + +from tests.support.sync import AsyncPoll + +from .. import assert_response_event + +PAGE_EMPTY_TEXT = "/webdriver/tests/bidi/network/support/empty.txt" + + +@pytest.mark.asyncio +async def test_cached( + bidi_session, + top_context, + wait_for_event, + url, + fetch, + setup_network_test, +): + network_events = await setup_network_test( + events=[ + "network.responseStarted", + ] + ) + events = network_events["network.responseStarted"] + + cached_url = url( + f"/webdriver/tests/support/http_handlers/cached.py?status=200&nocache={random.random()}" + ) + on_response_started = wait_for_event("network.responseStarted") + await fetch(cached_url) + await on_response_started + + assert len(events) == 1 + expected_request = {"method": "GET", "url": cached_url} + + # The first request/response is used to fill the browser cache, so we expect + # fromCache to be False here. + expected_response = { + "url": cached_url, + "fromCache": False, + "status": 200, + } + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + ) + + on_response_started = wait_for_event("network.responseStarted") + await fetch(cached_url) + await on_response_started + + assert len(events) == 2 + + # The second request for the same URL has to be read from the local cache. + expected_response = { + "url": cached_url, + "fromCache": True, + "status": 200, + } + assert_response_event( + events[1], + expected_request=expected_request, + expected_response=expected_response, + ) + + +@pytest.mark.asyncio +async def test_cached_redirect( + bidi_session, + top_context, + wait_for_event, + url, + fetch, + setup_network_test, +): + network_events = await setup_network_test( + events=[ + "network.responseStarted", + ] + ) + events = network_events["network.responseStarted"] + + text_url = url(PAGE_EMPTY_TEXT) + cached_url = url( + f"/webdriver/tests/support/http_handlers/cached.py?status=301&location={text_url}&nocache={random.random()}" + ) + + await fetch(cached_url) + + # Expect two events, one for the initial request and one for the redirect. + wait = AsyncPoll(bidi_session, timeout=2) + await wait.until(lambda _: len(events) >= 2) + assert len(events) == 2 + + # The first request/response is used to fill the cache, so we expect + # fromCache to be False here. + expected_request = {"method": "GET", "url": cached_url} + expected_response = { + "url": cached_url, + "fromCache": False, + "status": 301, + } + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + ) + + # The second request is the redirect + redirected_request = {"method": "GET", "url": text_url} + redirected_response = {"url": text_url, "status": 200} + assert_response_event( + events[1], + expected_request=redirected_request, + expected_response=redirected_response, + ) + + await fetch(cached_url) + wait = AsyncPoll(bidi_session, timeout=2) + await wait.until(lambda _: len(events) >= 4) + assert len(events) == 4 + + # The third request hits cached_url again and has to be read from the local cache. + expected_response = { + "url": cached_url, + "fromCache": True, + "status": 301, + } + assert_response_event( + events[2], + expected_request=expected_request, + expected_response=expected_response, + ) + + # The fourth request is the redirect + assert_response_event( + events[3], + expected_request=redirected_request, + expected_response=redirected_response, + ) + + +@pytest.mark.parametrize( + "method", + [ + "GET", + "HEAD", + "OPTIONS", + ], +) +@pytest.mark.asyncio +async def test_cached_revalidate( + bidi_session, top_context, wait_for_event, url, fetch, setup_network_test, method +): + network_events = await setup_network_test( + events=[ + "network.responseStarted", + ] + ) + events = network_events["network.responseStarted"] + + revalidate_url = url( + f"/webdriver/tests/support/http_handlers/must-revalidate.py?nocache={random.random()}" + ) + on_response_started = wait_for_event("network.responseStarted") + await fetch(revalidate_url, method=method) + await on_response_started + + assert len(events) == 1 + expected_request = {"method": method, "url": revalidate_url} + expected_response = { + "url": revalidate_url, + "fromCache": False, + "status": 200, + } + assert_response_event( + events[0], + expected_request=expected_request, + expected_response=expected_response, + ) + + on_response_started = wait_for_event("network.responseStarted") + + # Note that we pass a specific header so that the must-revalidate.py handler + # can decide to return a 304 without having to use another URL. + await fetch(revalidate_url, method=method, headers={"return-304": "true"}) + await on_response_started + + assert len(events) == 2 + + # Here fromCache should still be false, because for a 304 response the response + # cache state is "validated" and fromCache is only true if cache state is "local" + expected_response = { + "url": revalidate_url, + "fromCache": False, + "status": 304, + } + assert_response_event( + events[1], + expected_request=expected_request, + expected_response=expected_response, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.html b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.html new file mode 100644 index 0000000000..69e9da4114 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.html @@ -0,0 +1,2 @@ + + diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.js b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.js new file mode 100644 index 0000000000..3918c74e44 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.js @@ -0,0 +1 @@ +"use strict"; diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.png b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.png new file mode 100644 index 0000000000..afb763ce9d Binary files /dev/null and b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.png differ diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.svg b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.svg new file mode 100644 index 0000000000..158b3aac16 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.svg @@ -0,0 +1 @@ + diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.txt b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.txt new file mode 100644 index 0000000000..c6cac69265 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.txt @@ -0,0 +1 @@ +empty diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirect_http_equiv.html b/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirect_http_equiv.html new file mode 100644 index 0000000000..9b588c67ef --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirect_http_equiv.html @@ -0,0 +1,4 @@ + + + + diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirected.html b/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirected.html new file mode 100644 index 0000000000..3732b218cf --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirected.html @@ -0,0 +1,2 @@ + +redirected diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/__init__.py new file mode 100644 index 0000000000..de443c4578 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/__init__.py @@ -0,0 +1,62 @@ +from typing import Any, Callable, Mapping + +from .. import any_int, any_string, recursive_compare + + +def assert_handle(obj: Mapping[str, Any], should_contain_handle: bool) -> None: + if should_contain_handle: + assert "handle" in obj, f"Result should contain `handle`. Actual: {obj}" + assert isinstance(obj["handle"], str), f"`handle` should be a string, but was {type(obj['handle'])}" + + # Recursively check that handle is not found in any of the nested values. + if "value" in obj: + value = obj["value"] + if type(value) is list: + for v in value: + assert_handle(v, False) + + if type(value) is dict: + for v in value.values(): + assert_handle(v, False) + + else: + assert "handle" not in obj, f"Result should not contain `handle`. Actual: {obj}" + + +def specific_error_response(expected_error: Mapping[str, Any]) -> Callable[[Any], None]: + return lambda actual: recursive_compare( + { + "realm": any_string, + "exceptionDetails": { + "columnNumber": any_int, + "exception": expected_error, + "lineNumber": any_int, + "stackTrace": any_stack_trace, + "text": any_string, + }, + }, + actual) + + +def any_stack_trace(actual: Any) -> None: + assert type(actual) is dict + assert "callFrames" in actual + assert type(actual["callFrames"]) is list + for actual_frame in actual["callFrames"]: + any_stack_frame(actual_frame) + + +def any_stack_frame(actual: Any) -> None: + assert type(actual) is dict + + assert "columnNumber" in actual + any_int(actual["columnNumber"]) + + assert "functionName" in actual + any_string(actual["functionName"]) + + assert "lineNumber" in actual + any_int(actual["lineNumber"]) + + assert "url" in actual + any_string(actual["url"]) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/add_preload_script.py b/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/add_preload_script.py new file mode 100644 index 0000000000..98c01554ee --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/add_preload_script.py @@ -0,0 +1,172 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget + + +@pytest.mark.asyncio +@pytest.mark.parametrize("type_hint", ["tab", "window"]) +async def test_add_preload_script( + bidi_session, add_preload_script, top_context, inline, type_hint +): + await add_preload_script(function_declaration="() => { window.foo='bar'; }") + + # Check that preload script didn't apply the changes to the current context + result = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(top_context["context"]), + await_promise=True, + ) + assert result == {"type": "undefined"} + + new_context = await bidi_session.browsing_context.create(type_hint=type_hint) + + # Check that preload script applied the changes to the window + result = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_context["context"]), + await_promise=True, + ) + assert result == {"type": "string", "value": "bar"} + + url = inline("

foo
") + await bidi_session.browsing_context.navigate( + context=new_context["context"], + url=url, + wait="complete", + ) + + # Check that preload script was applied after navigation + result = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_context["context"]), + await_promise=True, + ) + assert result == {"type": "string", "value": "bar"} + + +@pytest.mark.asyncio +async def test_add_same_preload_script_twice(add_preload_script): + script_1 = await add_preload_script(function_declaration="() => { return 42; }") + script_2 = await add_preload_script(function_declaration="() => { return 42; }") + + # Make sure that preload scripts have different ids + assert script_1 != script_2 + + +@pytest.mark.asyncio +async def test_script_order( + bidi_session, add_preload_script, subscribe_events, new_tab, inline +): + preload_script_console_text = "preload script" + + await add_preload_script( + function_declaration=f"() => {{ console.log('{preload_script_console_text}') }}" + ) + await subscribe_events(events=["log.entryAdded"], contexts=[new_tab["context"]]) + + events = [] + + async def on_event(method, data): + # Ignore errors and warnings which might occur during test execution + if data["level"] == "info": + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + user_console_text = "user script" + url = inline(f"") + + await bidi_session.browsing_context.navigate( + context=new_tab["context"], + url=url, + wait="complete", + ) + + assert len(events) > 0 + # Make sure that console event from preload script comes first + events[0]["text"] == preload_script_console_text + + remove_listener() + + +@pytest.mark.asyncio +async def test_add_preload_script_in_iframe( + bidi_session, add_preload_script, new_tab, test_page_same_origin_frame +): + await add_preload_script(function_declaration="() => { window.bar='foo'; }") + + await bidi_session.browsing_context.navigate( + context=new_tab["context"], + url=test_page_same_origin_frame, + wait="complete", + ) + + # Check that preload script applied the changes to the window + result = await bidi_session.script.evaluate( + expression="window.bar", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + assert result == {"type": "string", "value": "foo"} + + contexts = await bidi_session.browsing_context.get_tree(root=new_tab["context"]) + + assert len(contexts[0]["children"]) == 1 + frame_context = contexts[0]["children"][0] + + # Check that preload script applied the changes to the iframe + result = await bidi_session.script.evaluate( + expression="window.bar", + target=ContextTarget(frame_context["context"]), + await_promise=True, + ) + assert result == {"type": "string", "value": "foo"} + + +@pytest.mark.asyncio +async def test_add_preload_script_with_error( + bidi_session, add_preload_script, subscribe_events, inline, new_tab, wait_for_event +): + await add_preload_script( + function_declaration=f"() => {{ throw Error('error in preload script') }}" + ) + + await subscribe_events(events=["browsingContext.load", "log.entryAdded"]) + + on_entry = wait_for_event("log.entryAdded") + on_load = wait_for_event("browsingContext.load") + + url = inline("
foo
") + await bidi_session.browsing_context.navigate(context=new_tab["context"], url=url) + error_event = await on_entry + + # Make sure that page is loaded + await on_load + + # Make sure that exception from preloaded script was reported + assert error_event["level"] == "error" + assert error_event["text"] == "Error: error in preload script" + + +@pytest.mark.asyncio +async def test_page_script_can_access_preload_script_properties( + bidi_session, add_preload_script, new_tab, inline +): + await add_preload_script( + function_declaration="() => { window.preloadScriptFunction = () => window.baz = 42; }" + ) + + url = inline("") + await bidi_session.browsing_context.navigate( + context=new_tab["context"], + url=url, + wait="complete", + ) + + # Check that page script could access a function set up by the preload script + result = await bidi_session.script.evaluate( + expression="window.baz", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + assert result == {"type": "number", "value": 42} diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/arguments.py b/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/arguments.py new file mode 100644 index 0000000000..81f9036827 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/arguments.py @@ -0,0 +1,236 @@ +import pytest +from tests.support.sync import AsyncPoll +from webdriver.bidi.modules.script import ContextTarget + +from ... import any_string, recursive_compare + + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize( + "channel, expected_data", + [ + ( + {"type": "channel", "value": {"channel": "channel_name"}}, + { + "type": "object", + "value": [ + ["foo", {"type": "string", "value": "bar"}], + [ + "baz", + { + "type": "object", + "value": [["1", {"type": "number", "value": 2}]], + }, + ], + ], + }, + ), + ( + { + "type": "channel", + "value": { + "channel": "channel_name", + "serializationOptions": {"maxObjectDepth": 0}, + }, + }, + {"type": "object"}, + ), + ( + { + "type": "channel", + "value": {"channel": "channel_name", "ownership": "root"}, + }, + { + "handle": any_string, + "type": "object", + "value": [ + ["foo", {"type": "string", "value": "bar"}], + [ + "baz", + { + "type": "object", + "value": [["1", {"type": "number", "value": 2}]], + }, + ], + ], + }, + ), + ], + ids=["default", "with serializationOptions", "with ownership"], +) +async def test_channel( + bidi_session, + subscribe_events, + wait_for_event, + add_preload_script, + channel, + expected_data, +): + await subscribe_events(["script.message"]) + + on_script_message = wait_for_event("script.message") + await add_preload_script( + function_declaration="""(channel) => channel({'foo': 'bar', 'baz': {'1': 2}})""", + arguments=[channel], + ) + + new_tab = await bidi_session.browsing_context.create(type_hint="tab") + event_data = await on_script_message + + recursive_compare( + { + "channel": "channel_name", + "data": expected_data, + "source": { + "realm": any_string, + "context": new_tab["context"], + }, + }, + event_data, + ) + + +async def test_channel_with_multiple_arguments( + bidi_session, subscribe_events, wait_for_event, add_preload_script +): + await subscribe_events(["script.message"]) + + on_script_message = wait_for_event("script.message") + await add_preload_script( + function_declaration="""(channel) => channel('will_be_send', 'will_be_ignored')""", + arguments=[{"type": "channel", "value": {"channel": "channel_name"}}], + ) + + new_tab = await bidi_session.browsing_context.create(type_hint="tab") + event_data = await on_script_message + + recursive_compare( + { + "channel": "channel_name", + "data": {"type": "string", "value": "will_be_send"}, + "source": { + "realm": any_string, + "context": new_tab["context"], + }, + }, + event_data, + ) + + +async def test_mutation_observer( + bidi_session, + subscribe_events, + wait_for_event, + new_tab, + inline, + add_preload_script, +): + await subscribe_events(["script.message"]) + + on_script_message = wait_for_event("script.message") + await add_preload_script( + function_declaration="""(channel) => { + const onMutation = (mutationList) => mutationList.forEach(mutation => { + const attributeName = mutation.attributeName; + const newValue = mutation.target.getAttribute(mutation.attributeName); + channel({ attributeName, newValue }); + }); + const observer = new MutationObserver(onMutation); + observer.observe(document, { attributes: true, subtree: true }); + }""", + arguments=[{"type": "channel", "value": {"channel": "channel_name"}}], + ) + + url = inline("
foo
") + await bidi_session.browsing_context.navigate( + context=new_tab["context"], + url=url, + wait="complete", + ) + + restult = await bidi_session.script.evaluate( + raw_result=True, + expression="document.querySelector('div').setAttribute('class', 'mutated')", + await_promise=True, + target=ContextTarget(new_tab["context"]), + ) + + event_data = await on_script_message + + recursive_compare( + { + "channel": "channel_name", + "data": { + "type": "object", + "value": [ + ["attributeName", {"type": "string", "value": "class"}], + ["newValue", {"type": "string", "value": "mutated"}], + ], + }, + "source": { + "realm": restult["realm"], + "context": new_tab["context"], + }, + }, + event_data, + ) + + +async def test_two_channels( + bidi_session, + subscribe_events, + add_preload_script, +): + await subscribe_events(["script.message"]) + + # Track all received script.message events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("script.message", on_event) + + await add_preload_script( + function_declaration="""(channel_1, channel_2) => { + channel_1('message_from_channel_1'); + channel_2('message_from_channel_2') + }""", + arguments=[ + {"type": "channel", "value": {"channel": "channel_name_1"}}, + {"type": "channel", "value": {"channel": "channel_name_2"}}, + ], + ) + + new_tab = await bidi_session.browsing_context.create(type_hint="tab") + # Wait for both events + wait = AsyncPoll(bidi_session, timeout=0.5) + await wait.until(lambda _: len(events) == 2) + + recursive_compare( + { + "channel": "channel_name_1", + "data": {"type": "string", "value": "message_from_channel_1"}, + "source": { + "realm": any_string, + "context": new_tab["context"], + }, + }, + events[0], + ) + + recursive_compare( + { + "channel": "channel_name_2", + "data": {"type": "string", "value": "message_from_channel_2"}, + "source": { + "realm": any_string, + "context": new_tab["context"], + }, + }, + events[1], + ) + + remove_listener() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/invalid.py new file mode 100644 index 0000000000..54440ff678 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/invalid.py @@ -0,0 +1,194 @@ +import pytest +import webdriver.bidi.error as error + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("function_declaration", [None, False, 42, {}, []]) +async def test_params_function_declaration_invalid_type( + bidi_session, function_declaration +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration=function_declaration + ), + + +@pytest.mark.parametrize("arguments", [False, "SOME_STRING", 42, {}]) +async def test_params_arguments_invalid_type(bidi_session, arguments): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=arguments, + ) + + +@pytest.mark.parametrize("argument", [False, "SOME_STRING", 42, {}, []]) +async def test_params_arguments_entry_invalid_type(bidi_session, argument): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[argument], + ) + + +async def test_params_arguments_entry_invalid_value(bidi_session): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[{"type": "foo"}], + ) + + +@pytest.mark.parametrize("value", [None, False, "_UNKNOWN_", 42, []]) +async def test_params_arguments_channel_value_invalid_type(bidi_session, value): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[{"type": "channel", "value": value}], + ) + + +@pytest.mark.parametrize("channel", [None, False, 42, [], {}]) +async def test_params_arguments_channel_id_invalid_type(bidi_session, channel): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[{"type": "channel", "value": {"channel": channel}}], + ) + + +@pytest.mark.parametrize("ownership", [False, 42, {}, []]) +async def test_params_arguments_channel_ownership_invalid_type(bidi_session, ownership): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[{"type": "channel", "value": {"ownership": ownership}}], + ) + + +async def test_params_arguments_channel_ownership_invalid_value(bidi_session): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[{"type": "channel", "value": {"ownership": "_UNKNOWN_"}}], + ) + + +@pytest.mark.parametrize("serialization_options", [False, "_UNKNOWN_", 42, []]) +async def test_params_arguments_channel_serialization_options_invalid_type( + bidi_session, serialization_options +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[ + { + "type": "channel", + "value": {"serializationOptions": serialization_options}, + } + ], + ) + + +@pytest.mark.parametrize("max_dom_depth", [False, "_UNKNOWN_", {}, []]) +async def test_params_arguments_channel_max_dom_depth_invalid_type( + bidi_session, max_dom_depth +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[ + { + "type": "channel", + "value": {"serializationOptions": {"maxDomDepth": max_dom_depth}}, + } + ], + ) + + +async def test_params_arguments_channel_max_dom_depth_invalid_value(bidi_session): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[ + { + "type": "channel", + "value": {"serializationOptions": {"maxDomDepth": -1}}, + } + ], + ) + + +@pytest.mark.parametrize("max_object_depth", [False, "_UNKNOWN_", {}, []]) +async def test_params_arguments_channel_max_object_depth_invalid_type( + bidi_session, max_object_depth +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[ + { + "type": "channel", + "value": { + "serializationOptions": {"maxObjectDepth": max_object_depth} + }, + } + ], + ) + + +async def test_params_arguments_channel_max_object_depth_invalid_value(bidi_session): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[ + { + "type": "channel", + "value": {"serializationOptions": {"maxObjectDepth": -1}}, + } + ], + ) + + +@pytest.mark.parametrize("include_shadow_tree", [False, 42, {}, []]) +async def test_params_arguments_channel_include_shadow_tree_invalid_type( + bidi_session, include_shadow_tree +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[ + { + "type": "channel", + "value": { + "serializationOptions": { + "includeShadowTree": include_shadow_tree + } + }, + } + ], + ) + + +async def test_params_arguments_channel_include_shadow_tree_invalid_value(bidi_session): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", + arguments=[ + { + "type": "channel", + "value": { + "serializationOptions": {"includeShadowTree": "_UNKNOWN_"} + }, + } + ], + ) + + +@pytest.mark.parametrize("sandbox", [False, 42, {}, []]) +async def test_params_sandbox_invalid_type(bidi_session, sandbox): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.add_preload_script( + function_declaration="() => {}", sandbox=sandbox + ), diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/sandbox.py b/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/sandbox.py new file mode 100644 index 0000000000..364eb5ce1a --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/add_preload_script/sandbox.py @@ -0,0 +1,70 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget + + +@pytest.mark.asyncio +async def test_add_preload_script_to_sandbox(bidi_session, add_preload_script): + # Add preload script to make changes in window + await add_preload_script(function_declaration="() => { window.foo = 1; }") + # Add preload script to make changes in sandbox + await add_preload_script( + function_declaration="() => { window.bar = 2; }", sandbox="sandbox" + ) + + new_tab = await bidi_session.browsing_context.create(type_hint="tab") + + # Check that changes from the first preload script are not present in sandbox + result_in_sandbox = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_tab["context"], "sandbox"), + await_promise=True, + ) + assert result_in_sandbox == {"type": "undefined"} + + # Make sure that changes from the second preload script are not present in window + result = await bidi_session.script.evaluate( + expression="window.bar", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + assert result == {"type": "undefined"} + + # Make sure that changes from the second preload script are present in sandbox + result_in_sandbox = await bidi_session.script.evaluate( + expression="window.bar", + target=ContextTarget(new_tab["context"], "sandbox"), + await_promise=True, + ) + assert result_in_sandbox == {"type": "number", "value": 2} + + +@pytest.mark.asyncio +async def test_remove_properties_set_by_preload_script( + bidi_session, add_preload_script, new_tab, inline +): + await add_preload_script(function_declaration="() => { window.foo = 42 }") + await add_preload_script(function_declaration="() => { window.foo = 50 }", sandbox="sandbox_1") + + url = inline("") + await bidi_session.browsing_context.navigate( + context=new_tab["context"], + url=url, + wait="complete", + ) + + # Check that page script could access a function set up by the preload script + result = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + assert result == {"type": "undefined"} + + # Check that page script could access a function set up by the preload script + result = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_tab["context"], sandbox="sandbox_1"), + await_promise=True, + ) + assert result == {"type": "number", "value": 50} diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/arguments.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/arguments.py new file mode 100644 index 0000000000..0cb571038b --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/arguments.py @@ -0,0 +1,678 @@ +import pytest +from tests.support.sync import AsyncPoll +import webdriver.bidi.error as error +from webdriver.bidi.modules.script import ContextTarget, SerializationOptions + +from ... import any_string, recursive_compare + + +@pytest.mark.asyncio +async def test_default_arguments(bidi_session, top_context): + result = await bidi_session.script.call_function( + function_declaration="(...args) => args", + await_promise=False, + target=ContextTarget(top_context["context"])) + + recursive_compare({ + "type": "array", + "value": [] + }, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "argument, expected", + [ + ({"type": "undefined"}, "undefined"), + ({"type": "null"}, "null"), + ({"type": "string", "value": "foobar"}, "'foobar'"), + ({"type": "string", "value": "2"}, "'2'"), + ({"type": "number", "value": "-0"}, "-0"), + ({"type": "number", "value": "Infinity"}, "Infinity"), + ({"type": "number", "value": "-Infinity"}, "-Infinity"), + ({"type": "number", "value": 3}, "3"), + ({"type": "number", "value": 1.4}, "1.4"), + ({"type": "boolean", "value": True}, "true"), + ({"type": "boolean", "value": False}, "false"), + ({"type": "bigint", "value": "42"}, "42n"), + ], +) +async def test_primitive_value(bidi_session, top_context, argument, expected): + result = await bidi_session.script.call_function( + function_declaration=f"""(arg) => {{ + if (arg !== {expected}) {{ + throw new Error(`Argument should be {expected}, but was ` + arg); + }} + return arg; + }}""", + arguments=[argument], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + recursive_compare(argument, result) + + +@pytest.mark.asyncio +async def test_primitive_value_NaN(bidi_session, top_context): + nan_remote_value = {"type": "number", "value": "NaN"} + result = await bidi_session.script.call_function( + function_declaration="""(arg) => { + if (!isNaN(arg)) { + throw new Error("Argument should be 'NaN', but was " + arg); + } + return arg; + }""", + arguments=[nan_remote_value], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + recursive_compare(nan_remote_value, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "argument, expected_type", + [ + ({ + "type": "array", + "value": [ + {"type": "string", "value": "foobar"}, + ], + }, + "Array" + ), + ({"type": "date", "value": "2022-05-31T13:47:29.000Z"}, + "Date" + ), + ({ + "type": "map", + "value": [ + ["foobar", {"type": "string", "value": "foobar"}], + ], + }, + "Map" + ), + ({ + "type": "object", + "value": [ + ["foobar", {"type": "string", "value": "foobar"}], + ], + }, + "Object" + ), + ({"type": "regexp", "value": {"pattern": "foo", "flags": "g"}}, + "RegExp" + ), + ({ + "type": "set", + "value": [ + {"type": "string", "value": "foobar"}, + ], + }, + "Set" + ) + ], +) +async def test_local_value(bidi_session, top_context, argument, expected_type): + result = await bidi_session.script.call_function( + function_declaration=f"""(arg) => {{ + if (!(arg instanceof {expected_type})) {{ + const type = Object.prototype.toString.call(arg); + throw new Error( + "Argument type should be {expected_type}, but was " + type + ); + }} + return arg; + }}""", + arguments=[argument], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + recursive_compare(argument, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "setup_expression, function_declaration, expected", + [ + ( + "Symbol('foo')", + "(symbol) => symbol.toString()", + {"type": "string", "value": "Symbol(foo)"}, + ), + ("[1,2]", "(array) => array[0]", {"type": "number", "value": 1}), + ( + "new RegExp('foo')", + "(regexp) => regexp.source", + {"type": "string", "value": "foo"}, + ), + ( + "new Date(1654004849000)", + "(date) => date.toISOString()", + {"type": "string", "value": "2022-05-31T13:47:29.000Z"}, + ), + ( + "new Map([['foo', 'bar']])", + "(map) => map.get('foo')", + {"type": "string", "value": "bar"}, + ), + ( + "new Set(['foo'])", + "(set) => set.has('foo')", + {"type": "boolean", "value": True}, + ), + ( + "{const weakMap = new WeakMap(); weakMap.set(weakMap, 'foo')}", + "(weakMap)=> weakMap.get(weakMap)", + {"type": "string", "value": "foo"}, + ), + ( + "{const weakSet = new WeakSet(); weakSet.add(weakSet)}", + "(weakSet)=> weakSet.has(weakSet)", + {"type": "boolean", "value": True}, + ), + ( + "new Error('error message')", + "(error) => error.message", + {"type": "string", "value": "error message"}, + ), + ( + "new SyntaxError('syntax error message')", + "(error) => error.message", + {"type": "string", "value": "syntax error message"}, + ), + ( + "new Promise((resolve) => resolve(3))", + "(promise) => promise", + {"type": "number", "value": 3}, + ), + ( + "new Int8Array(2)", + "(int8Array) => int8Array.length", + {"type": "number", "value": 2}, + ), + ( + "new ArrayBuffer(8)", + "(arrayBuffer) => arrayBuffer.byteLength", + {"type": "number", "value": 8}, + ), + ("() => true", "(func) => func()", {"type": "boolean", "value": True}), + ( + "(function() {return false;})", + "(func) => func()", + {"type": "boolean", "value": False}, + ), + ( + "window.foo = 3; window", + "(window) => window.foo", + {"type": "number", "value": 3}, + ), + ( + "window.url = new URL('https://example.com'); window.url", + "(url) => url.hostname", + {"type": "string", "value": "example.com"}, + ), + ( + "({SOME_PROPERTY:'SOME_VALUE'})", + "(obj) => obj.SOME_PROPERTY", + {"type": "string", "value": "SOME_VALUE"}, + ), + ], +) +async def test_remote_reference_argument( + bidi_session, top_context, setup_expression, function_declaration, expected +): + remote_value_result = await bidi_session.script.evaluate( + expression=setup_expression, + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"]), + ) + remote_value_handle = remote_value_result.get("handle") + + assert isinstance(remote_value_handle, str) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + arguments=[{"handle": remote_value_handle}], + await_promise=True if remote_value_result["type"] == "promise" else False, + target=ContextTarget(top_context["context"]), + ) + + assert result == expected + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "value_fn, function_declaration", + [ + ( + lambda value: value, + "function(arg) { return arg === window.SOME_OBJECT; }", + ), + ( + lambda value: ({"type": "object", "value": [["nested", value]]}), + "function(arg) { return arg.nested === window.SOME_OBJECT; }", + ), + ( + lambda value: ({"type": "array", "value": [value]}), + "function(arg) { return arg[0] === window.SOME_OBJECT; }", + ), + ( + lambda value: ({"type": "map", "value": [["foobar", value]]}), + "function(arg) { return arg.get('foobar') === window.SOME_OBJECT; }", + ), + ( + lambda value: ({"type": "set", "value": [value]}), + "function(arg) { return arg.has(window.SOME_OBJECT); }", + ), + ], +) +async def test_remote_reference_deserialization( + bidi_session, top_context, call_function, evaluate, value_fn, function_declaration +): + remote_value = await evaluate( + "window.SOME_OBJECT = { SOME_PROPERTY: 'SOME_VALUE' }; window.SOME_OBJECT", + result_ownership="root", + ) + + # Check that a remote value can be successfully deserialized as an "argument" + # parameter and compared against the original object in the page. + result = await call_function( + function_declaration=function_declaration, + arguments=[value_fn(remote_value)], + ) + assert result == {"type": "boolean", "value": True} + + # Reload the page to cleanup the state + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=top_context["url"], wait="complete" + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "setup_expression, expected_node_type", + [ + ("document.querySelector('img')", 1), + ("document.querySelector('input#button').attributes[0]", 2), + ("document.querySelector('#with-text-node').childNodes[0]", 3), + ("""document.createProcessingInstruction("xml-stylesheet", "href='foo.css'")""", 7), + ("document.querySelector('#with-comment').childNodes[0]", 8), + ("document", 9), + ("document.doctype", 10), + ("document.createDocumentFragment()", 11), + ("document.querySelector('#custom-element').shadowRoot", 11), + ], + ids=[ + "element", + "attribute", + "text node", + "processing instruction", + "comment", + "document", + "doctype", + "document fragment", + "shadow root", + ] +) +async def test_remote_reference_node_argument( + bidi_session, get_test_page, top_context, setup_expression, expected_node_type +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + remote_reference = await bidi_session.script.evaluate( + expression=setup_expression, + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + result = await bidi_session.script.call_function( + function_declaration="(node) => node.nodeType", + arguments=[remote_reference], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + assert result == {"type": "number", "value": expected_node_type} + + +@pytest.mark.asyncio +async def test_remote_reference_node_cdata(bidi_session, inline, top_context): + xml_page = inline("""CDATA section: & ]]>.""", doctype="xml") + + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=xml_page, wait="complete" + ) + + remote_reference = await bidi_session.script.evaluate( + expression="document.querySelector('foo').childNodes[1]", + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + result = await bidi_session.script.call_function( + function_declaration="(node) => node.nodeType", + arguments=[remote_reference], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + assert result == {"type": "number", "value": 4} + + +@pytest.mark.asyncio +async def test_remote_reference_sharedId_precedence_over_handle( + bidi_session, get_test_page, top_context +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + remote_reference = await bidi_session.script.evaluate( + expression="document.querySelector('img')", + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"]), + ) + + assert "handle" in remote_reference + # Invalidate shared reference to trigger a "no such node" error + remote_reference["sharedId"] = "foo" + + with pytest.raises(error.NoSuchNodeException): + await bidi_session.script.call_function( + function_declaration="(node) => node.nodeType", + arguments=[remote_reference], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, function_declaration, expected", + [ + ( + "document.getElementsByTagName('span')", + "(collection) => collection.item(0)", + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "children": [], + "localName": "span", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + } + ), + ( + "document.querySelectorAll('span')", + "(nodeList) => nodeList.item(0)", + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "children": [], + "localName": "span", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + } + ), + ], ids=[ + "htmlcollection", + "nodelist" + ] +) +async def test_remote_reference_dom_collection( + bidi_session, + inline, + top_context, + call_function, + expression, + function_declaration, + expected +): + page_url = inline("""

""") + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=page_url, wait="complete" + ) + + remote_value = await bidi_session.script.evaluate( + expression=expression, + result_ownership="root", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + # Check that a remote value can be successfully deserialized as an "argument" + # parameter and the first element be extracted. + result = await call_function( + function_declaration=function_declaration, + arguments=[remote_value], + serialization_options=SerializationOptions(max_dom_depth=1), + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "channel, expected_data", + [ + ( + {"type": "channel", "value": {"channel": "channel_name"}}, + { + "type": "object", + "value": [ + ["foo", {"type": "string", "value": "bar"}], + [ + "baz", + { + "type": "object", + "value": [["1", {"type": "number", "value": 2}]], + }, + ], + ], + }, + ), + ( + { + "type": "channel", + "value": { + "channel": "channel_name", + "serializationOptions": { + "maxObjectDepth": 0 + }, + }, + }, + {"type": "object"}, + ), + ( + { + "type": "channel", + "value": {"channel": "channel_name", "ownership": "root"}, + }, + { + "handle": any_string, + "type": "object", + "value": [ + ["foo", {"type": "string", "value": "bar"}], + [ + "baz", + { + "type": "object", + "value": [["1", {"type": "number", "value": 2}]], + }, + ], + ], + }, + ), + ], + ids=["default", "with serializationOptions", "with ownership"], +) +async def test_channel( + bidi_session, top_context, subscribe_events, wait_for_event, channel, expected_data +): + await subscribe_events(["script.message"]) + + on_script_message = wait_for_event("script.message") + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="""(channel) => channel({'foo': 'bar', 'baz': {'1': 2}})""", + arguments=[channel], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + event_data = await on_script_message + + recursive_compare( + { + "channel": "channel_name", + "data": expected_data, + "source": { + "realm": result["realm"], + "context": top_context["context"], + }, + }, + event_data, + ) + + +@pytest.mark.asyncio +async def test_channel_with_multiple_arguments( + bidi_session, top_context, subscribe_events, wait_for_event +): + await subscribe_events(["script.message"]) + + on_script_message = wait_for_event("script.message") + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="""(channel) => channel('will_be_send', 'will_be_ignored')""", + arguments=[{"type": "channel", "value": {"channel": "channel_name"}}], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + event_data = await on_script_message + + recursive_compare( + { + "channel": "channel_name", + "data": {"type": "string", "value": "will_be_send"}, + "source": { + "realm": result["realm"], + "context": top_context["context"], + }, + }, + event_data, + ) + + +@pytest.mark.asyncio +async def test_two_channels( + bidi_session, + top_context, + subscribe_events, +): + await subscribe_events(["script.message"]) + + # Track all received script.message events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("script.message", on_event) + + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="""(channel_1, channel_2) => { + channel_1('message_from_channel_1'); + channel_2('message_from_channel_2') + }""", + arguments=[ + {"type": "channel", "value": {"channel": "channel_name_1"}}, + {"type": "channel", "value": {"channel": "channel_name_2"}}, + ], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + # Wait for both events + wait = AsyncPoll(bidi_session, timeout=0.5) + await wait.until(lambda _: len(events) == 2) + + recursive_compare( + { + "channel": "channel_name_1", + "data": {"type": "string", "value": "message_from_channel_1"}, + "source": { + "realm": result["realm"], + "context": top_context["context"], + }, + }, + events[0], + ) + + recursive_compare( + { + "channel": "channel_name_2", + "data": {"type": "string", "value": "message_from_channel_2"}, + "source": { + "realm": result["realm"], + "context": top_context["context"], + }, + }, + events[1], + ) + + remove_listener() + + +@pytest.mark.asyncio +async def test_channel_and_nonchannel_arguments( + bidi_session, + top_context, + wait_for_event, + subscribe_events, +): + await subscribe_events(["script.message"]) + + on_script_message = wait_for_event("script.message") + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="""(string, channel) => { + channel(string); + }""", + arguments=[ + {"type": "string", "value": "foo"}, + {"type": "channel", "value": {"channel": "channel_name"}}, + ], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + event_data = await on_script_message + + recursive_compare( + { + "channel": "channel_name", + "data": {"type": "string", "value": "foo"}, + "source": { + "realm": result["realm"], + "context": top_context["context"], + }, + }, + event_data, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/await_promise.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/await_promise.py new file mode 100644 index 0000000000..f31d35cd80 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/await_promise.py @@ -0,0 +1,48 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException + +from ... import any_int, any_string, recursive_compare +from .. import any_stack_trace + + +@pytest.mark.asyncio +@pytest.mark.parametrize("await_promise", [True, False]) +async def test_await_promise_delayed(bidi_session, top_context, await_promise): + result = await bidi_session.script.call_function( + function_declaration=""" + async function() {{ + await new Promise(r => setTimeout(() => r(), 0)); + return "SOME_DELAYED_RESULT"; + }} + """, + await_promise=await_promise, + target=ContextTarget(top_context["context"]), + ) + + if await_promise: + assert result == { + "type": "string", + "value": "SOME_DELAYED_RESULT"} + else: + recursive_compare({ + "type": "promise"}, + result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("await_promise", [True, False]) +async def test_await_promise_async_arrow(bidi_session, top_context, await_promise): + result = await bidi_session.script.call_function( + function_declaration="async ()=>{return 'SOME_VALUE'}", + await_promise=await_promise, + target=ContextTarget(top_context["context"])) + + if await_promise: + assert result == { + "type": "string", + "value": "SOME_VALUE"} + else: + recursive_compare({ + "type": "promise"}, + result) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/exception_details.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/exception_details.py new file mode 100644 index 0000000000..97f38d5aaa --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/exception_details.py @@ -0,0 +1,211 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException + +from ... import any_int, any_string, recursive_compare +from .. import any_stack_trace + + +@pytest.mark.asyncio +async def test_invalid_function(bidi_session, top_context): + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.call_function( + function_declaration="))) !!@@## some invalid JS script (((", + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + recursive_compare( + { + "realm": any_string, + "exceptionDetails": { + "columnNumber": any_int, + "exception": {"type": "error"}, + "lineNumber": any_int, + "stackTrace": any_stack_trace, + "text": any_string, + }, + }, + exception.value.result, + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("await_promise", [True, False]) +@pytest.mark.parametrize( + "expression, expected", + [ + ("undefined", {"type": "undefined"}), + ("null", {"type": "null"}), + ("'foobar'", {"type": "string", "value": "foobar"}), + ("'2'", {"type": "string", "value": "2"}), + ("Number.NaN", {"type": "number", "value": "NaN"}), + ("-0", {"type": "number", "value": "-0"}), + ("Infinity", {"type": "number", "value": "Infinity"}), + ("-Infinity", {"type": "number", "value": "-Infinity"}), + ("3", {"type": "number", "value": 3}), + ("1.4", {"type": "number", "value": 1.4}), + ("true", {"type": "boolean", "value": True}), + ("false", {"type": "boolean", "value": False}), + ("42n", {"type": "bigint", "value": "42"}), + ("(Symbol('foo'))", {"type": "symbol", },), + ( + "[1, 'foo', true, new RegExp(/foo/g), [1]]", + { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "string", "value": "foo"}, + {"type": "boolean", "value": True}, + { + "type": "regexp", + "value": { + "pattern": "foo", + "flags": "g", + }, + }, + {"type": "array"}, + ], + }, + ), + ( + "({'foo': {'bar': 'baz'}, 'qux': 'quux'})", + { + "type": "object", + "value": [ + ["foo", {"type": "object"}], + ["qux", {"type": "string", "value": "quux"}], + ], + }, + ), + ("(()=>{})", {"type": "function", },), + ("(function(){})", {"type": "function", },), + ("(async ()=>{})", {"type": "function", },), + ("(async function(){})", {"type": "function", },), + ( + "new RegExp(/foo/g)", + { + "type": "regexp", + "value": { + "pattern": "foo", + "flags": "g", + }, + }, + ), + ( + "new Date(1654004849000)", + { + "type": "date", + "value": "2022-05-31T13:47:29.000Z", + }, + ), + ( + "new Map([[1, 2], ['foo', 'bar'], [true, false], ['baz', [1]]])", + { + "type": "map", + "value": [ + [ + {"type": "number", "value": 1}, + {"type": "number", "value": 2}, + ], + ["foo", {"type": "string", "value": "bar"}], + [ + {"type": "boolean", "value": True}, + {"type": "boolean", "value": False}, + ], + ["baz", {"type": "array"}], + ], + }, + ), + ( + "new Set([1, 'foo', true, [1], new Map([[1,2]])])", + { + "type": "set", + "value": [ + {"type": "number", "value": 1}, + {"type": "string", "value": "foo"}, + {"type": "boolean", "value": True}, + {"type": "array"}, + {"type": "map"}, + ], + }, + ), + ("new WeakMap()", {"type": "weakmap", },), + ("new WeakSet()", {"type": "weakset", },), + ("new Error('SOME_ERROR_TEXT')", {"type": "error"},), + # TODO(sadym): add `iterator` test. + # TODO(sadym): add `generator` test. + # TODO(sadym): add `proxy` test. + ("Promise.resolve()", {"type": "promise", },), + ("new Int32Array()", {"type": "typedarray", },), + ("new ArrayBuffer()", {"type": "arraybuffer", },), + ( + "document.createElement('div')", + { + "type": "node", + 'value': { + 'attributes': {}, + 'childNodeCount': 0, + 'localName': 'div', + 'namespaceURI': 'http://www.w3.org/1999/xhtml', + 'nodeType': 1, + } + }, + ), + ("window", {"type": "window", },), + ], +) +@pytest.mark.asyncio +async def test_exception_details(bidi_session, top_context, await_promise, expression, expected): + function_declaration = f"()=>{{ throw {expression} }}" + if await_promise: + function_declaration = "async" + function_declaration + + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.call_function( + function_declaration=function_declaration, + await_promise=await_promise, + target=ContextTarget(top_context["context"]), + ) + + recursive_compare( + { + "realm": any_string, + "exceptionDetails": { + "columnNumber": any_int, + "exception": expected, + "lineNumber": any_int, + "stackTrace": any_stack_trace, + "text": any_string, + }, + }, + exception.value.result, + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("chained", [True, False]) +async def test_rejected_promise(bidi_session, top_context, chained): + if chained: + function_declaration = "() => Promise.reject('error').then(() => { })" + else: + function_declaration = "() => Promise.reject('error')" + + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.call_function( + function_declaration=function_declaration, + await_promise=True, + target=ContextTarget(top_context["context"]), + ) + + recursive_compare( + { + "realm": any_string, + "exceptionDetails": { + "columnNumber": any_int, + "exception": {"type": "string", "value": "error"}, + "lineNumber": any_int, + "stackTrace": any_stack_trace, + "text": any_string, + }, + }, + exception.value.result, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/function_declaration.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/function_declaration.py new file mode 100644 index 0000000000..292e6da53b --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/function_declaration.py @@ -0,0 +1,14 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget + + +@pytest.mark.asyncio +async def test_arrow_function(bidi_session, top_context): + result = await bidi_session.script.call_function( + function_declaration="()=>{return 1+2;}", + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + assert result == {"type": "number", "value": 3} diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/internal_id.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/internal_id.py new file mode 100644 index 0000000000..562084203a --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/internal_id.py @@ -0,0 +1,67 @@ +import pytest + +from ... import recursive_compare, any_string + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "return_structure, result_type", + [ + ("[data, data]", "array"), + ("new Map([['foo', data],['bar', data]])", "map"), + ("({ 'foo': data, 'bar': data })", "object"), + ], +) +@pytest.mark.parametrize( + "expression, type", + [ + ("[1]", "array"), + ("new Map([[true, false]])", "map"), + ("new Set(['baz'])", "set"), + ("{ baz: 'qux' }", "object"), + ], +) +async def test_remote_values_with_internal_id( + call_function, return_structure, result_type, expression, type +): + result = await call_function( + f"() => {{ const data = {expression}; return {return_structure}; }}" + ) + result_value = result["value"] + + assert len(result_value) == 2 + + if result_type == "array": + value = [ + {"type": type, "internalId": any_string}, + {"type": type, "internalId": any_string}, + ] + internalId1 = result_value[0]["internalId"] + internalId2 = result_value[1]["internalId"] + else: + value = [ + ["foo", {"type": type, "internalId": any_string}], + ["bar", {"type": type, "internalId": any_string}], + ] + internalId1 = result_value[0][1]["internalId"] + internalId2 = result_value[1][1]["internalId"] + + # Make sure that the same duplicated objects have the same internal ids + assert internalId1 == internalId2 + + recursive_compare(value, result_value) + + +@pytest.mark.asyncio +async def test_different_remote_values_have_unique_internal_ids(call_function): + result = await call_function( + "() => { const obj1 = [1]; const obj2 = {'foo': 'bar'}; return [obj1, obj2, obj1, obj2]; }" + ) + + assert len(result["value"]) == 4 + + internalId1 = result["value"][0]["internalId"] + internalId2 = result["value"][1]["internalId"] + + # Make sure that different duplicated objects have different internal ids + assert internalId1 != internalId2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid.py new file mode 100644 index 0000000000..800c4e26a6 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid.py @@ -0,0 +1,423 @@ +import pytest +import webdriver.bidi.error as error + +from webdriver.bidi.modules.script import ContextTarget, RealmTarget, SerializationOptions + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("target", [None, False, "foo", 42, {}, []]) +async def test_params_target_invalid_type(bidi_session, target): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + await_promise=False, + target=target) + + +@pytest.mark.parametrize("context", [None, False, 42, {}, []]) +async def test_params_context_invalid_type(bidi_session, context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + await_promise=False, + target=ContextTarget(context)) + + +@pytest.mark.parametrize("sandbox", [False, 42, {}, []]) +async def test_params_sandbox_invalid_type(bidi_session, top_context, sandbox): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + await_promise=False, + target=ContextTarget(top_context["context"], + sandbox)) + + +async def test_params_context_unknown(bidi_session): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + await_promise=False, + target=ContextTarget("_UNKNOWN_")) + + +@pytest.mark.parametrize("realm", [None, False, 42, {}, []]) +async def test_params_realm_invalid_type(bidi_session, realm): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + await_promise=False, + target=RealmTarget(realm)) + + +async def test_params_realm_unknown(bidi_session): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + await_promise=False, + target=RealmTarget("_UNKNOWN_")) + + +@pytest.mark.parametrize("function_declaration", [None, False, 42, {}, []]) +async def test_params_function_declaration_invalid_type(bidi_session, top_context, + function_declaration): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration=function_declaration, + await_promise=False, + target=ContextTarget(top_context["context"])) + + +@pytest.mark.parametrize("this", [False, "SOME_STRING", 42, {}, []]) +async def test_params_this_invalid_type(bidi_session, top_context, + this): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + this=this, + await_promise=False, + target=ContextTarget(top_context["context"])) + + +@pytest.mark.parametrize("arguments", [False, "SOME_STRING", 42, {}]) +async def test_params_arguments_invalid_type(bidi_session, top_context, + arguments): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=arguments, + await_promise=False, + target=ContextTarget(top_context["context"])) + + +@pytest.mark.parametrize("argument", [False, "SOME_STRING", 42, {}, []]) +async def test_params_arguments_entry_invalid_type(bidi_session, top_context, + argument): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[argument], + await_promise=False, + target=ContextTarget(top_context["context"])) + + +@pytest.mark.parametrize("value", [None, False, "_UNKNOWN_", 42, []]) +async def test_params_arguments_channel_value_invalid_type( + bidi_session, top_context, value +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[{"type": "channel", "value": value}], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +@pytest.mark.parametrize("channel", [None, False, 42, [], {}]) +async def test_params_arguments_channel_id_invalid_type( + bidi_session, top_context, channel +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[{"type": "channel", "value": {"channel": channel}}], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +@pytest.mark.parametrize("ownership", [False, 42, {}, []]) +async def test_params_arguments_channel_ownership_invalid_type( + bidi_session, top_context, ownership +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[{"type": "channel", "value": {"ownership": ownership}}], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +async def test_params_arguments_channel_ownership_invalid_value( + bidi_session, top_context +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[{"type": "channel", "value": {"ownership": "_UNKNOWN_"}}], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +@pytest.mark.parametrize("serialization_options", [False, "_UNKNOWN_", 42, []]) +async def test_params_arguments_channel_serialization_options_invalid_type( + bidi_session, top_context, serialization_options +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[ + { + "type": "channel", + "value": {"serializationOptions": serialization_options}, + } + ], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +@pytest.mark.parametrize("max_dom_depth", [False, "_UNKNOWN_", {}, []]) +async def test_params_arguments_channel_max_dom_depth_invalid_type( + bidi_session, top_context, max_dom_depth +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[ + { + "type": "channel", + "value": { + "serializationOptions": {"maxDomDepth": max_dom_depth} + }, + } + ], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +async def test_params_arguments_channel_max_dom_depth_invalid_value( + bidi_session, top_context +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[ + { + "type": "channel", + "value": { + "serializationOptions": {"maxDomDepth": -1} + }, + } + ], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +@pytest.mark.parametrize("max_object_depth", [False, "_UNKNOWN_", {}, []]) +async def test_params_arguments_channel_max_object_depth_invalid_type( + bidi_session, top_context, max_object_depth +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[ + { + "type": "channel", + "value": { + "serializationOptions": {"maxObjectDepth": max_object_depth} + }, + } + ], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +async def test_params_arguments_channel_max_object_depth_invalid_value( + bidi_session, top_context +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[ + { + "type": "channel", + "value": {"serializationOptions": {"maxObjectDepth": -1}}, + } + ], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +@pytest.mark.parametrize("include_shadow_tree", [False, 42, {}, []]) +async def test_params_arguments_channel_include_shadow_tree_invalid_type( + bidi_session, top_context, include_shadow_tree +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[ + { + "type": "channel", + "value": { + "serializationOptions": { + "includeShadowTree": include_shadow_tree + } + }, + } + ], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +async def test_params_arguments_channel_include_shadow_tree_invalid_value( + bidi_session, top_context +): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[ + { + "type": "channel", + "value": { + "serializationOptions": {"includeShadowTree": "_UNKNOWN_"} + }, + } + ], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + +@pytest.mark.parametrize("value", [None, False, 42, {}, []]) +async def test_params_arguments_handle_invalid_type( + bidi_session, top_context, value +): + serialized_value = { + "handle": value, + } + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[serialized_value], + await_promise=False, + target=ContextTarget(top_context["context"])) + + +async def test_params_arguments_handle_unknown_value( + bidi_session, top_context +): + serialized_value = { + "handle": "foo", + } + + with pytest.raises(error.NoSuchHandleException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[serialized_value], + await_promise=False, + target=ContextTarget(top_context["context"])) + + +@pytest.mark.parametrize("value", [None, False, 42, {}, []]) +async def test_params_arguments_sharedId_invalid_type( + bidi_session, top_context, value +): + serialized_value = { + "sharedId": value, + } + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + arguments=[serialized_value], + await_promise=False, + target=ContextTarget(top_context["context"])) + + +@pytest.mark.parametrize("await_promise", [None, "False", 0, 42, {}, []]) +async def test_params_await_promise_invalid_type(bidi_session, top_context, + await_promise): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + await_promise=await_promise, + target=ContextTarget(top_context["context"])) + + +@pytest.mark.parametrize("result_ownership", [False, "_UNKNOWN_", 42, {}, []]) +async def test_params_result_ownership_invalid_value(bidi_session, top_context, + result_ownership): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + await_promise=False, + target=ContextTarget(top_context["context"]), + result_ownership=result_ownership) + + +@pytest.mark.parametrize("serialization_options", [False, "_UNKNOWN_", 42, []]) +async def test_params_serialization_options_invalid_type(bidi_session, top_context, serialization_options): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + serialization_options=serialization_options, + target=ContextTarget(top_context["context"]), + await_promise=True) + + +@pytest.mark.parametrize("max_dom_depth", [False, "_UNKNOWN_", {}, []]) +async def test_params_max_dom_depth_invalid_type(bidi_session, top_context, max_dom_depth): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + serialization_options=SerializationOptions(max_dom_depth=max_dom_depth), + target=ContextTarget(top_context["context"]), + await_promise=True) + + +async def test_params_max_dom_depth_invalid_value(bidi_session, top_context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + serialization_options=SerializationOptions(max_dom_depth=-1), + target=ContextTarget(top_context["context"]), + await_promise=True) + + +@pytest.mark.parametrize("max_object_depth", [False, "_UNKNOWN_", {}, []]) +async def test_params_max_object_depth_invalid_type(bidi_session, top_context, max_object_depth): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + serialization_options=SerializationOptions(max_object_depth=max_object_depth), + target=ContextTarget(top_context["context"]), + await_promise=True) + + +async def test_params_max_object_depth_invalid_value(bidi_session, top_context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + serialization_options=SerializationOptions(max_object_depth=-1), + target=ContextTarget(top_context["context"]), + await_promise=True) + + +@pytest.mark.parametrize("include_shadow_tree", [False, 42, {}, []]) +async def test_params_max_object_depth_invalid_type(bidi_session, top_context, include_shadow_tree): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + serialization_options=SerializationOptions(include_shadow_tree=include_shadow_tree), + target=ContextTarget(top_context["context"]), + await_promise=True) + + +async def test_params_max_object_depth_invalid_value(bidi_session, top_context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="(arg) => arg", + serialization_options=SerializationOptions(include_shadow_tree="foo"), + target=ContextTarget(top_context["context"]), + await_promise=True) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid_tentative.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid_tentative.py new file mode 100644 index 0000000000..2726178e47 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid_tentative.py @@ -0,0 +1,38 @@ +import pytest +import webdriver.bidi.error as error + +from webdriver.bidi.modules.script import ContextTarget + +pytestmark = pytest.mark.asyncio + + +# The following tests are marked as tentative until +# https://github.com/w3c/webdriver-bidi/issues/274 is resolved. +async def test_params_target_invalid_value(bidi_session, top_context): + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="() => 1 + 2", + target=ContextTarget(top_context["context"]), + await_promise=True, + ) + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="() => 1 + 2", + target={"context": top_context["context"], "realm": result["realm"]}, + await_promise=True, + ) + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="() => 1 + 2", + target={"sandbox": "foo", "realm": result["realm"]}, + await_promise=True, + ) + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="() => 1 + 2", + target={"sandbox": "bar"}, + await_promise=True, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/realm.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/realm.py new file mode 100644 index 0000000000..a8830230ee --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/realm.py @@ -0,0 +1,71 @@ +import pytest + +from webdriver.bidi.modules.script import RealmTarget +from ... import recursive_compare + + +@pytest.mark.asyncio +async def test_target_realm(bidi_session, default_realm): + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="() => { window.foo = 3; }", + target=RealmTarget(default_realm), + await_promise=True, + ) + + recursive_compare({"realm": default_realm, "result": {"type": "undefined"}}, result) + + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="() => window.foo", + target=RealmTarget(default_realm), + await_promise=True, + ) + + recursive_compare( + {"realm": default_realm, "result": {"type": "number", "value": 3}}, result + ) + + +@pytest.mark.asyncio +async def test_different_target_realm(bidi_session): + await bidi_session.browsing_context.create(type_hint="tab") + + realms = await bidi_session.script.get_realms() + first_tab_default_realm = realms[0]["realm"] + second_tab_default_realm = realms[1]["realm"] + + assert first_tab_default_realm != second_tab_default_realm + + await bidi_session.script.call_function( + raw_result=True, + function_declaration="() => { window.foo = 3; }", + target=RealmTarget(first_tab_default_realm), + await_promise=True, + ) + await bidi_session.script.call_function( + raw_result=True, + function_declaration="() => { window.foo = 5; }", + target=RealmTarget(second_tab_default_realm), + await_promise=True, + ) + + top_context_result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="() => window.foo", + target=RealmTarget(first_tab_default_realm), + await_promise=True, + ) + recursive_compare( + {"realm": first_tab_default_realm, "result": {"type": "number", "value": 3}}, top_context_result + ) + + new_context_result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="() => window.foo", + target=RealmTarget(second_tab_default_realm), + await_promise=True, + ) + recursive_compare( + {"realm": second_tab_default_realm, "result": {"type": "number", "value": 5}}, new_context_result + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result.py new file mode 100644 index 0000000000..99d62780fc --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result.py @@ -0,0 +1,161 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget, SerializationOptions + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("await_promise", [True, False]) +@pytest.mark.parametrize( + "expression, expected", + [ + ("undefined", {"type": "undefined"}), + ("null", {"type": "null"}), + ("'foobar'", {"type": "string", "value": "foobar"}), + ("'2'", {"type": "string", "value": "2"}), + ("Number.NaN", {"type": "number", "value": "NaN"}), + ("-0", {"type": "number", "value": "-0"}), + ("Infinity", {"type": "number", "value": "Infinity"}), + ("-Infinity", {"type": "number", "value": "-Infinity"}), + ("3", {"type": "number", "value": 3}), + ("1.4", {"type": "number", "value": 1.4}), + ("true", {"type": "boolean", "value": True}), + ("false", {"type": "boolean", "value": False}), + ("42n", {"type": "bigint", "value": "42"}), + ], +) +async def test_primitive_values(bidi_session, top_context, await_promise, expression, expected): + function_declaration = f"()=>{expression}" + if await_promise: + function_declaration = "async" + function_declaration + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + await_promise=await_promise, + target=ContextTarget(top_context["context"]), + ) + + assert result == expected + + +@pytest.mark.parametrize("await_promise", [True, False]) +@pytest.mark.parametrize( + "expression, expected", + [ + ("(Symbol('foo'))", {"type": "symbol"}), + ( + "[1, 'foo', true, new RegExp(/foo/g), [1]]", + { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "string", "value": "foo"}, + {"type": "boolean", "value": True}, + { + "type": "regexp", + "value": { + "pattern": "foo", + "flags": "g", + }, + }, + {"type": "array"}, + ], + }, + ), + ( + "({'foo': {'bar': 'baz'}, 'qux': 'quux'})", + { + "type": "object", + "value": [ + ["foo", {"type": "object"}], + ["qux", {"type": "string", "value": "quux"}], + ], + }, + ), + ("(()=>{})", {"type": "function"}), + ("(function(){})", {"type": "function"}), + ("(async ()=>{})", {"type": "function"}), + ("(async function(){})", {"type": "function"}), + ( + "new RegExp(/foo/g)", + {"type": "regexp", "value": {"pattern": "foo", "flags": "g"}}, + ), + ( + "new Date(1654004849000)", + {"type": "date", "value": "2022-05-31T13:47:29.000Z"}, + ), + ( + "new Map([[1, 2], ['foo', 'bar'], [true, false], ['baz', [1]]])", + { + "type": "map", + "value": [ + [ + {"type": "number", "value": 1}, + {"type": "number", "value": 2}, + ], + ["foo", {"type": "string", "value": "bar"}], + [ + {"type": "boolean", "value": True}, + {"type": "boolean", "value": False}, + ], + ["baz", {"type": "array"}], + ], + }, + ), + ( + "new Set([1, 'foo', true, [1], new Map([[1,2]])])", + { + "type": "set", + "value": [ + {"type": "number", "value": 1}, + {"type": "string", "value": "foo"}, + {"type": "boolean", "value": True}, + {"type": "array"}, + {"type": "map"}, + ], + }, + ), + ("new WeakMap()", {"type": "weakmap"}), + ("new WeakSet()", {"type": "weakset"}), + ("new Error('SOME_ERROR_TEXT')", {"type": "error"}), + # TODO(sadym): add `iterator` test. + # TODO(sadym): add `generator` test. + # TODO(sadym): add `proxy` test. + ("new Int32Array()", {"type": "typedarray"}), + ("new ArrayBuffer()", {"type": "arraybuffer"}), + ("window", {"type": "window"}), + ("new URL('https://example.com')", {"type": "object", },), + ], +) +async def test_remote_values(bidi_session, top_context, await_promise, expression, expected): + function_declaration = f"()=>{expression}" + if await_promise: + function_declaration = "async" + function_declaration + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + await_promise=await_promise, + target=ContextTarget(top_context["context"]), + serialization_options=SerializationOptions(max_object_depth=1), + ) + + assert result == expected + + +async def test_remote_value_promise_await(bidi_session, top_context): + result = await bidi_session.script.call_function( + function_declaration="()=>Promise.resolve(42)", + await_promise=True, + target=ContextTarget(top_context["context"]), + ) + + assert result == {"type": "number", "value": 42} + + +async def test_remote_value_promise_no_await(bidi_session, top_context): + result = await bidi_session.script.call_function( + function_declaration="()=>Promise.resolve(42)", + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + assert result == {"type": "promise"} diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_node.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_node.py new file mode 100644 index 0000000000..378f6bed92 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_node.py @@ -0,0 +1,656 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget, SerializationOptions + +from ... import any_string, recursive_compare + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "function_declaration, expected", + [ + ( # basic + """ + () => document.querySelector("br") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "children": [], + "localName": "br", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + }, + }, + ), + ( # attributes + """ + () => document.querySelector("svg") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": { + "svg:foo": "bar", + }, + "childNodeCount": 0, + "children": [], + "localName": "svg", + "namespaceURI": "http://www.w3.org/2000/svg", + "nodeType": 1, + }, + }, + ), + ( # all children including non-element nodes + """ + () => document.querySelector("#with-text-node") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-text-node"}, + "childNodeCount": 1, + "children": [{ + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 3, + "nodeValue": "Lorem", + } + }], + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + }, + }, + ), + ( # children limited due to max depth + """ + () => document.querySelector("#with-children") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-children"}, + "childNodeCount": 2, + "children": [{ + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 1, + "localName": "p", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }, { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "br", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }], + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + }, + }, + ), + ( # not connected + """ + () => document.createElement("div") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "children": [], + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + }, + }, + ), + ], ids=[ + "basic", + "attributes", + "all_children", + "children_max_depth", + "not_connected", + ] +) +async def test_element_node( + bidi_session, get_test_page, top_context, function_declaration, expected +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + target=ContextTarget(top_context["context"]), + await_promise=False, + serialization_options=SerializationOptions(max_dom_depth=1), + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "function_declaration, expected", + [ + ( + """ + () => document.querySelector("input#button").attributes[0] + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "localName": "id", + "namespaceURI": None, + "nodeType": 2, + "nodeValue": "button", + }, + }, + ), ( + """ + () => document.querySelector("svg").attributes[0] + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "localName": "foo", + "namespaceURI": "http://www.w3.org/2000/svg", + "nodeType": 2, + "nodeValue": "bar", + }, + }, + ), + ], ids=[ + "basic", + "namespace", + ] +) +async def test_attribute_node( + bidi_session, get_test_page, top_context, function_declaration, expected +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "function_declaration, expected", + [ + ( + """ + () => document.querySelector("#with-text-node").childNodes[0] + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 3, + "nodeValue": "Lorem", + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_text_node( + bidi_session, get_test_page, top_context, function_declaration, expected +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "function_declaration, expected", + [ + ( + """ + () => document.querySelector("foo").childNodes[1] + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 4, + "nodeValue": " < > & ", + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_cdata_node(bidi_session, inline, new_tab, function_declaration, expected): + xml_page = inline("""CDATA section: & ]]>.""", doctype="xml") + + await bidi_session.browsing_context.navigate( + context=new_tab['context'], url=xml_page, wait="complete" + ) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + target=ContextTarget(new_tab["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "function_declaration, expected", + [ + ( + """ + () => document.createProcessingInstruction("xml-stylesheet", "href='foo.css'") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 7, + "nodeValue": "href='foo.css'", + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_processing_instruction_node( + bidi_session, inline, new_tab, function_declaration, expected +): + xml_page = inline("""""", doctype="xml") + + await bidi_session.browsing_context.navigate( + context=new_tab['context'], url=xml_page, wait="complete" + ) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + target=ContextTarget(new_tab["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "function_declaration, expected", + [ + ( + """ + () => document.querySelector("#with-comment").childNodes[0] + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 8, + "nodeValue": " Comment ", + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_comment_node( + bidi_session, get_test_page, top_context, function_declaration, expected +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "function_declaration, expected", + [ + ( + """ + () => document + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 2, + "children": [{ + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 10 + } + }, { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 2, + "localName": "html", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }], + "nodeType": 9 + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_document_node( + bidi_session, get_test_page, top_context, function_declaration, expected +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + target=ContextTarget(top_context["context"]), + await_promise=False, + serialization_options=SerializationOptions(max_dom_depth=1), + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "function_declaration, expected", + [ + ( + """ + () => document.doctype + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 10, + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_doctype_node( + bidi_session, get_test_page, top_context, function_declaration, expected +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "function_declaration, expected", + [ + ( + """ + () => document.querySelector("#custom-element").shadowRoot + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 1, + "children": [{ + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "in-shadow-dom"}, + "childNodeCount": 1, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }], + "nodeType": 11 + } + } + ), + ( + """ + () => document.createDocumentFragment() + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "children": [], + "nodeType": 11, + } + } + ), + ], ids=[ + "shadow root", + "not connected", + ] +) +async def test_document_fragment_node( + bidi_session, get_test_page, top_context, function_declaration, expected +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + target=ContextTarget(top_context["context"]), + await_promise=False, + serialization_options=SerializationOptions(max_dom_depth=1), + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +async def test_node_within_object(bidi_session, get_test_page, top_context): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.call_function( + function_declaration="""() => ({"elem": document.querySelector("img")})""", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + expected = { + "type": "object", + "value": [ + ["elem", { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "img", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }] + ] + } + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "function_declaration, expected", + [ + ( + "() => document.getElementsByTagName('img')", + { + "type": "htmlcollection", + "value": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "img", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }, + ] + } + ), + ( + "() => document.querySelectorAll('img')", + { + "type": "nodelist", + "value": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "img", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }, + ] + } + ), + ], ids=[ + "htmlcollection", + "nodelist" + ] +) +async def test_node_within_dom_collection( + bidi_session, + get_test_page, + top_context, + function_declaration, + expected +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + target=ContextTarget(top_context["context"]), + await_promise=False, + serialization_options=SerializationOptions(max_dom_depth=1), + ) + + recursive_compare(expected, result) + + +@pytest.mark.parametrize("shadow_root_mode", ["open", "closed"]) +@pytest.mark.asyncio +async def test_custom_element_with_shadow_root( + bidi_session, get_test_page, top_context, shadow_root_mode +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=get_test_page(shadow_root_mode=shadow_root_mode), + wait="complete", + ) + + result = await bidi_session.script.call_function( + function_declaration="""() => document.querySelector("#custom-element")""", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare({ + "type": "node", + "sharedId": any_string, + "value": { + "attributes": { + "id": "custom-element", + }, + "childNodeCount": 0, + "localName": "custom-element", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": { + "sharedId": any_string, + "type": "node", + "value": { + "childNodeCount": 1, + "mode": shadow_root_mode, + "nodeType": 11, + } + }, + } + }, result) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_ownership.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_ownership.py new file mode 100644 index 0000000000..84b8f776b4 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_ownership.py @@ -0,0 +1,60 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException +from .. import assert_handle + + +@pytest.mark.asyncio +@pytest.mark.parametrize("result_ownership, should_contain_handle", + [("root", True), ("none", False), (None, False)]) +async def test_throw_exception(bidi_session, top_context, result_ownership, should_contain_handle): + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.call_function( + function_declaration='()=>{throw {a:1}}', + await_promise=False, + result_ownership=result_ownership, + target=ContextTarget(top_context["context"])) + + assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("result_ownership, should_contain_handle", + [("root", True), ("none", False), (None, False)]) +async def test_invalid_script(bidi_session, top_context, result_ownership, should_contain_handle): + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.call_function( + function_declaration="))) !!@@## some invalid JS script (((", + await_promise=False, + result_ownership=result_ownership, + target=ContextTarget(top_context["context"])) + + assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("result_ownership, should_contain_handle", + [("root", True), ("none", False), (None, False)]) +async def test_rejected_promise(bidi_session, top_context, result_ownership, should_contain_handle): + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.call_function( + function_declaration="()=>{return Promise.reject({a:1})}", + await_promise=True, + result_ownership=result_ownership, + target=ContextTarget(top_context["context"])) + + assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("await_promise", [True, False]) +@pytest.mark.parametrize("result_ownership, should_contain_handle", + [("root", True), ("none", False), (None, False)]) +async def test_return_value(bidi_session, top_context, await_promise, result_ownership, should_contain_handle): + result = await bidi_session.script.call_function( + function_declaration="async function(){return {a: {b:1}}}", + await_promise=await_promise, + result_ownership=result_ownership, + target=ContextTarget(top_context["context"])) + + assert_handle(result, should_contain_handle) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/sandbox.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/sandbox.py new file mode 100644 index 0000000000..382ede3c78 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/sandbox.py @@ -0,0 +1,239 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget, RealmTarget, ScriptEvaluateResultException + +from ... import any_int, any_string, recursive_compare +from .. import any_stack_trace + + +@pytest.mark.asyncio +async def test_sandbox(bidi_session, new_tab): + # Make changes in window + await bidi_session.script.call_function( + function_declaration="() => { window.foo = 1; }", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + + # Check that changes are not present in sandbox + result_in_sandbox = await bidi_session.script.call_function( + function_declaration="() => window.foo", + target=ContextTarget(new_tab["context"], "sandbox"), + await_promise=True, + ) + assert result_in_sandbox == {"type": "undefined"} + + # Make changes in sandbox + await bidi_session.script.call_function( + function_declaration="() => { window.bar = 2; }", + target=ContextTarget(new_tab["context"], "sandbox"), + await_promise=True, + ) + + # Make sure that changes are present in sandbox + result_in_sandbox = await bidi_session.script.call_function( + function_declaration="() => window.bar", + target=ContextTarget(new_tab["context"], "sandbox"), + await_promise=True, + ) + assert result_in_sandbox == {"type": "number", "value": 2} + + # Make sure that changes didn't leak from sandbox + result_in_window = await bidi_session.script.call_function( + function_declaration="() => window.bar", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + assert result_in_window == {"type": "undefined"} + + +@pytest.mark.asyncio +async def test_sandbox_with_empty_name(bidi_session, new_tab): + # An empty string as a `sandbox` means the default realm should be used. + await bidi_session.script.call_function( + function_declaration="() => window.foo = 'bar'", + target=ContextTarget(new_tab["context"], ""), + await_promise=True, + ) + + # Make sure that we can find the sandbox with the empty name. + result = await bidi_session.script.call_function( + function_declaration="() => window.foo", + target=ContextTarget(new_tab["context"], ""), + await_promise=True, + ) + assert result == {"type": "string", "value": "bar"} + + # Make sure that we can find the value in the default realm. + result = await bidi_session.script.call_function( + function_declaration="() => window.foo", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + assert result == {"type": "string", "value": "bar"} + + +@pytest.mark.asyncio +async def test_switch_sandboxes(bidi_session, new_tab): + # Test that sandboxes are retained when switching between them + await bidi_session.script.call_function( + function_declaration="() => { window.foo = 1; }", + target=ContextTarget(new_tab["context"], "sandbox_1"), + await_promise=True, + ) + await bidi_session.script.call_function( + function_declaration="() => { window.foo = 2; }", + target=ContextTarget(new_tab["context"], "sandbox_2"), + await_promise=True, + ) + + result_in_sandbox_1 = await bidi_session.script.call_function( + function_declaration="() => window.foo", + target=ContextTarget(new_tab["context"], "sandbox_1"), + await_promise=True, + ) + assert result_in_sandbox_1 == {"type": "number", "value": 1} + + result_in_sandbox_2 = await bidi_session.script.call_function( + function_declaration="() => window.foo", + target=ContextTarget(new_tab["context"], "sandbox_2"), + await_promise=True, + ) + assert result_in_sandbox_2 == {"type": "number", "value": 2} + + +@pytest.mark.asyncio +async def test_sandbox_with_side_effects(bidi_session, new_tab): + # Make sure changing the node in sandbox will affect the other sandbox as well + await bidi_session.script.call_function( + function_declaration="() => document.querySelector('body').textContent = 'foo'", + target=ContextTarget(new_tab["context"], "sandbox_1"), + await_promise=True, + ) + expected_value = {"type": "string", "value": "foo"} + + result_in_sandbox_1 = await bidi_session.script.call_function( + function_declaration="() => document.querySelector('body').textContent", + target=ContextTarget(new_tab["context"], "sandbox_1"), + await_promise=True, + ) + assert result_in_sandbox_1 == expected_value + + result_in_sandbox_2 = await bidi_session.script.call_function( + function_declaration="() => document.querySelector('body').textContent", + target=ContextTarget(new_tab["context"], "sandbox_2"), + await_promise=True, + ) + assert result_in_sandbox_2 == expected_value + + +@pytest.mark.asyncio +async def test_sandbox_returns_same_node(bidi_session, new_tab): + node = await bidi_session.script.call_function( + function_declaration="() => document.querySelector('body')", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + recursive_compare({"type": "node", "sharedId": any_string}, node) + + node_sandbox = await bidi_session.script.call_function( + function_declaration="() => document.querySelector('body')", + target=ContextTarget(new_tab["context"], sandbox="sandbox_1"), + await_promise=True, + ) + assert node_sandbox == node + + +@pytest.mark.asyncio +async def test_arguments(bidi_session, new_tab): + argument = { + "type": "set", + "value": [ + {"type": "string", "value": "foobar"}, + ], + } + + result = await bidi_session.script.call_function( + function_declaration="""(arg) => { + if(! (arg instanceof Set)) + throw Error("Argument type should be Set, but was "+ + Object.prototype.toString.call(arg)); + return arg; + }""", + arguments=[argument], + await_promise=False, + target=ContextTarget(new_tab["context"], "sandbox"), + ) + recursive_compare(argument, result) + + +@pytest.mark.asyncio +async def test_arguments_uses_same_node_in_sandbox(bidi_session, new_tab): + node = await bidi_session.script.call_function( + function_declaration="() => document.querySelector('body')", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + recursive_compare({"type": "node", "sharedId": any_string}, node) + + result = await bidi_session.script.call_function( + function_declaration="""(node) => node.localName""", + arguments=[node], + await_promise=False, + target=ContextTarget(new_tab["context"], "sandbox"), + ) + assert result == {"type": "string", "value": "body"} + + +@pytest.mark.asyncio +@pytest.mark.parametrize("await_promise", [True, False]) +async def test_exception_details(bidi_session, new_tab, await_promise): + function_declaration = "()=>{{ throw 1 }}" + if await_promise: + function_declaration = "async" + function_declaration + + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.call_function( + function_declaration=function_declaration, + await_promise=await_promise, + target=ContextTarget(new_tab["context"], "sandbox"), + ) + + recursive_compare( + { + "realm": any_string, + "exceptionDetails": { + "columnNumber": any_int, + "exception": {"type": "number", "value": 1}, + "lineNumber": any_int, + "stackTrace": any_stack_trace, + "text": any_string, + }, + }, + exception.value.result, + ) + + +@pytest.mark.asyncio +async def test_target_realm(bidi_session, top_context, default_realm): + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="() => { window.foo = 3; }", + target=ContextTarget(top_context["context"], "sandbox"), + await_promise=True, + ) + realm = result["realm"] + + # Make sure that sandbox realm id is different from default + assert realm != default_realm + + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="() => window.foo", + target=RealmTarget(realm), + await_promise=True, + ) + + recursive_compare( + {"realm": realm, "result": {"type": "number", "value": 3}}, result + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/serialization_options.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/serialization_options.py new file mode 100644 index 0000000000..5f2c64558c --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/serialization_options.py @@ -0,0 +1,444 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget, SerializationOptions + +from ... import any_string, recursive_compare + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize( + "include_shadow_tree, shadow_root_mode, expected", + [ + ( + None, + "open", + { + "type": "node", + "sharedId": any_string, + "value": {"nodeType": 11, "childNodeCount": 1}, + }, + ), + ( + None, + "closed", + { + "type": "node", + "sharedId": any_string, + "value": {"nodeType": 11, "childNodeCount": 1}, + }, + ), + ( + "none", + "open", + { + "type": "node", + "sharedId": any_string, + "value": {"nodeType": 11, "childNodeCount": 1}, + }, + ), + ( + "none", + "closed", + { + "type": "node", + "sharedId": any_string, + "value": {"nodeType": 11, "childNodeCount": 1}, + }, + ), + ( + "open", + "open", + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 11, + "childNodeCount": 1, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 1, + "attributes": {"id": "in-shadow-dom"}, + "shadowRoot": None, + }, + } + ], + "mode": "open", + }, + }, + ), + ( + "open", + "closed", + { + "type": "node", + "sharedId": any_string, + "value": {"nodeType": 11, "childNodeCount": 1}, + }, + ), + ( + "all", + "open", + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 11, + "childNodeCount": 1, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 1, + "attributes": {"id": "in-shadow-dom"}, + "shadowRoot": None, + }, + } + ], + "mode": "open", + }, + }, + ), + ( + "all", + "closed", + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 11, + "childNodeCount": 1, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 1, + "attributes": {"id": "in-shadow-dom"}, + "shadowRoot": None, + }, + } + ], + "mode": "closed", + }, + }, + ), + ], + ids=[ + "default mode for open shadow root", + "default mode for closed shadow root", + "'none' mode for open shadow root", + "'none' mode for closed shadow root", + "'open' mode for open shadow root", + "'open' mode for closed shadow root", + "'all' mode for open shadow root", + "'all' mode for closed shadow root", + ], +) +async def test_include_shadow_tree( + bidi_session, + top_context, + get_test_page, + include_shadow_tree, + shadow_root_mode, + expected, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=get_test_page(shadow_root_mode=shadow_root_mode), + wait="complete", + ) + result = await bidi_session.script.call_function( + function_declaration="""() => document.querySelector("custom-element")""", + target=ContextTarget(top_context["context"]), + await_promise=True, + serialization_options=SerializationOptions( + include_shadow_tree=include_shadow_tree, max_dom_depth=1 + ), + ) + + recursive_compare(expected, result["value"]["shadowRoot"]) + + +@pytest.mark.parametrize( + "max_dom_depth, expected", + [ + ( + None, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-children"}, + "childNodeCount": 2, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ), + ( + 0, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-children"}, + "childNodeCount": 2, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ), + ( + 1, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-children"}, + "childNodeCount": 2, + "children": [ + { + "sharedId": any_string, + "type": "node", + "value": { + "attributes": {}, + "childNodeCount": 1, + "localName": "p", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + { + "sharedId": any_string, + "type": "node", + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "br", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ], + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ), + ( + 2, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-children"}, + "childNodeCount": 2, + "children": [ + { + "sharedId": any_string, + "type": "node", + "value": { + "attributes": {}, + "childNodeCount": 1, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "span", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 0, + "attributes": {}, + "shadowRoot": None, + }, + } + ], + "localName": "p", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + { + "sharedId": any_string, + "type": "node", + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "br", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ], + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ), + ], +) +async def test_max_dom_depth( + bidi_session, top_context, get_test_page, max_dom_depth, expected +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=get_test_page(), wait="complete" + ) + result = await bidi_session.script.call_function( + function_declaration="""() => document.querySelector("div#with-children")""", + target=ContextTarget(top_context["context"]), + await_promise=True, + serialization_options=SerializationOptions(max_dom_depth=max_dom_depth), + ) + + recursive_compare(expected, result) + + +async def test_max_dom_depth_null( + bidi_session, + send_blocking_command, + top_context, + get_test_page, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=get_test_page(), wait="complete" + ) + result = await send_blocking_command( + "script.callFunction", + { + "functionDeclaration": """() => document.querySelector("div#with-children")""", + "target": ContextTarget(top_context["context"]), + "awaitPromise": True, + "serializationOptions": {"maxDomDepth": None}, + }, + ) + + recursive_compare( + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 2, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "p", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 1, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "span", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 0, + "children": [], + "attributes": {}, + "shadowRoot": None, + }, + } + ], + "attributes": {}, + "shadowRoot": None, + }, + }, + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "br", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 0, + "children": [], + "attributes": {}, + "shadowRoot": None, + }, + }, + ], + "attributes": {"id": "with-children"}, + "shadowRoot": None, + }, + }, + result["result"], + ) + + +@pytest.mark.parametrize( + "max_object_depth, expected", + [ + ( + None, + { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "array", "value": [{"type": "number", "value": 2}]}, + ], + }, + ), + (0, {"type": "array"}), + ( + 1, + { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "array"}, + ], + }, + ), + ( + 2, + { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "array", "value": [{"type": "number", "value": 2}]}, + ], + }, + ), + ], +) +async def test_max_object_depth(bidi_session, top_context, max_object_depth, expected): + result = await bidi_session.script.call_function( + function_declaration="() => [1, [2]]", + target=ContextTarget(top_context["context"]), + await_promise=True, + serialization_options=SerializationOptions(max_object_depth=max_object_depth), + ) + + assert result == expected diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/strict_mode.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/strict_mode.py new file mode 100644 index 0000000000..1a9fd4f108 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/strict_mode.py @@ -0,0 +1,38 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException +from ... import any_int, any_string, recursive_compare +from .. import any_stack_trace, specific_error_response + + +@pytest.mark.asyncio +async def test_strict_mode(bidi_session, top_context): + + # As long as there is no `SOME_VARIABLE`, the command should fail in strict mode. + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.call_function( + function_declaration="()=>{'use strict';return SOME_VARIABLE=1}", + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + recursive_compare(specific_error_response({"type": "error"}), exception.value.result) + + # In non-strict mode, the command should succeed and global `SOME_VARIABLE` should be created. + result = await bidi_session.script.call_function( + function_declaration="()=>{return SOME_VARIABLE=1}", + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + assert result == { + "type": "number", + "value": 1} + + # Access created by the previous command `SOME_VARIABLE`. + result = await bidi_session.script.call_function( + function_declaration="()=>{'use strict';return SOME_VARIABLE=1}", + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + assert result == { + "type": "number", + "value": 1} diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/this.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/this.py new file mode 100644 index 0000000000..baa4e65b28 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/this.py @@ -0,0 +1,147 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget + +from ... import any_string, recursive_compare + + +@pytest.mark.asyncio +async def test_this(bidi_session, top_context): + result = await bidi_session.script.call_function( + function_declaration="function(){return this.some_property}", + this={ + "type": "object", + "value": [[ + "some_property", + { + "type": "number", + "value": 42 + }]]}, + await_promise=False, + target=ContextTarget(top_context["context"])) + + assert result == { + 'type': 'number', + 'value': 42} + + +@pytest.mark.asyncio +async def test_default_this(bidi_session, top_context): + result = await bidi_session.script.call_function( + function_declaration="function(){return this}", + await_promise=False, + target=ContextTarget(top_context["context"])) + + recursive_compare({ + "type": 'window', + }, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "value_fn, function_declaration", + [ + ( + lambda value: value, + "function() { return this === window.SOME_OBJECT; }", + ), + ( + lambda value: ({"type": "object", "value": [["nested", value]]}), + "function() { return this.nested === window.SOME_OBJECT; }", + ), + ( + lambda value: ({"type": "array", "value": [value]}), + "function() { return this[0] === window.SOME_OBJECT; }", + ), + ( + lambda value: ({"type": "map", "value": [["foobar", value]]}), + "function() { return this.get('foobar') === window.SOME_OBJECT; }", + ), + ( + lambda value: ({"type": "set", "value": [value]}), + "function() { return this.has(window.SOME_OBJECT); }", + ), + ], +) +async def test_remote_value_deserialization( + bidi_session, top_context, call_function, evaluate, value_fn, function_declaration +): + remote_value = await evaluate( + "window.SOME_OBJECT = {SOME_PROPERTY:'SOME_VALUE'}; window.SOME_OBJECT", + result_ownership="root", + ) + + # Check that a remote value can be successfully deserialized as the "this" + # parameter and compared against the original object in the page. + result = await call_function( + function_declaration=function_declaration, + this=value_fn(remote_value), + ) + assert result == {"type": "boolean", "value": True} + + # Reload the page to cleanup the state + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=top_context["url"], wait="complete" + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "channel, expected_data", + [ + ( + {"type": "channel", "value": {"channel": "channel_name"}}, + {"type": "object", "value": [["foo", {"type": "string", "value": "bar"}]]}, + ), + ( + { + "type": "channel", + "value": { + "channel": "channel_name", + "serializationOptions": { + "maxObjectDepth": 0 + }, + }, + }, + {"type": "object"}, + ), + ( + { + "type": "channel", + "value": {"channel": "channel_name", "ownership": "root"}, + }, + { + "handle": any_string, + "type": "object", + "value": [["foo", {"type": "string", "value": "bar"}]], + }, + ), + ], + ids=["default", "with serializationOptions", "with ownership"], +) +async def test_channel( + bidi_session, top_context, subscribe_events, wait_for_event, channel, expected_data +): + await subscribe_events(["script.message"]) + + on_entry_added = wait_for_event("script.message") + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="function() { return this({'foo': 'bar'}) }", + await_promise=False, + target=ContextTarget(top_context["context"]), + this=channel + ) + event_data = await on_entry_added + + recursive_compare( + { + "channel": "channel_name", + "data": expected_data, + "source": { + "realm": result["realm"], + "context": top_context["context"], + }, + }, + event_data, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/classic_interop/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/classic_interop/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/classic_interop/node_shared_id.py b/testing/web-platform/tests/webdriver/tests/bidi/script/classic_interop/node_shared_id.py new file mode 100644 index 0000000000..82b39b42e1 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/classic_interop/node_shared_id.py @@ -0,0 +1,101 @@ +import pytest + +from webdriver import Element, ShadowRoot +from webdriver.bidi.modules.script import ContextTarget + +pytestmark = pytest.mark.asyncio + +DOCUMENT_FRAGMENT_NODE = 11 +ELEMENT_NODE = 1 + + +async def test_web_element_reference_created_in_classic( + bidi_session, + current_session, + get_test_page, + top_context, +): + current_session.url = get_test_page() + + node = current_session.execute_script( + """return document.querySelector("div#with-children")""" + ) + shared_id = node.id + + # Use element reference from WebDriver classic in WebDriver BiDi + result = await bidi_session.script.call_function( + function_declaration="(node)=>{return node.nodeType}", + arguments=[{"sharedId": shared_id}], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + assert result == {"type": "number", "value": ELEMENT_NODE} + + +async def test_web_element_reference_created_in_bidi( + bidi_session, + current_session, + get_test_page, + top_context, +): + current_session.url = get_test_page() + + result = await bidi_session.script.evaluate( + expression="""document.querySelector("div#with-children")""", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + nodeType = result["value"]["nodeType"] + assert nodeType == ELEMENT_NODE + + # Use element reference from WebDriver BiDi in WebDriver classic + node = Element(current_session, result["sharedId"]) + nodeType = current_session.execute_script( + """return arguments[0].nodeType""", args=(node,) + ) + assert nodeType == ELEMENT_NODE + + +@pytest.mark.parametrize("shadow_root_mode", ["open", "closed"]) +async def test_shadow_root_reference_created_in_classic( + bidi_session, current_session, get_test_page, top_context, shadow_root_mode +): + current_session.url = get_test_page(shadow_root_mode=shadow_root_mode) + + node = current_session.execute_script( + """return document.querySelector("custom-element")""" + ) + shared_id = node.shadow_root.id + + # Use shadow root reference from WebDriver classic in WebDriver BiDi + result = await bidi_session.script.call_function( + function_declaration="(node)=>{return node.nodeType}", + arguments=[{"sharedId": shared_id}], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + assert result == {"type": "number", "value": DOCUMENT_FRAGMENT_NODE} + + +@pytest.mark.parametrize("shadow_root_mode", ["open", "closed"]) +async def test_shadow_root_reference_created_in_bidi( + bidi_session, current_session, get_test_page, top_context, shadow_root_mode +): + current_session.url = get_test_page(shadow_root_mode=shadow_root_mode) + + result = await bidi_session.script.evaluate( + expression="""document.querySelector("custom-element")""", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + shared_id_for_shadow_root = result["value"]["shadowRoot"]["sharedId"] + + # Use shadow root reference from WebDriver BiDi in WebDriver classic + node = ShadowRoot(current_session, shared_id_for_shadow_root) + nodeType = current_session.execute_script( + """return arguments[0].nodeType""", args=(node,) + ) + assert nodeType == DOCUMENT_FRAGMENT_NODE diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/conftest.py b/testing/web-platform/tests/webdriver/tests/bidi/script/conftest.py new file mode 100644 index 0000000000..61b1e09382 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/conftest.py @@ -0,0 +1,67 @@ +import pytest +import pytest_asyncio +from typing import Any, List, Mapping, Optional + +from webdriver.bidi.modules.script import ContextTarget, OwnershipModel, SerializationOptions + + +@pytest.fixture +def call_function(bidi_session, top_context): + async def call_function( + function_declaration: str, + arguments: List[Mapping[str, Any]] = [], + this: Any = None, + context: str = top_context["context"], + sandbox: str = None, + result_ownership: OwnershipModel = OwnershipModel.NONE.value, + serialization_options: Optional[SerializationOptions] = None, + ) -> Mapping[str, Any]: + if sandbox is None: + target = ContextTarget(top_context["context"]) + else: + target = ContextTarget(top_context["context"], sandbox) + + result = await bidi_session.script.call_function( + function_declaration=function_declaration, + arguments=arguments, + this=this, + await_promise=False, + result_ownership=result_ownership, + serialization_options=serialization_options, + target=target, + ) + return result + + return call_function + + +@pytest_asyncio.fixture +async def default_realm(bidi_session, top_context): + realms = await bidi_session.script.get_realms(context=top_context["context"]) + return realms[0]["realm"] + + +@pytest.fixture +def evaluate(bidi_session, top_context): + async def evaluate( + expression: str, + context: str = top_context["context"], + sandbox: str = None, + result_ownership: OwnershipModel = OwnershipModel.NONE.value, + serialization_options: Optional[SerializationOptions] = None, + ) -> Mapping[str, Any]: + if sandbox is None: + target = ContextTarget(top_context["context"]) + else: + target = ContextTarget(top_context["context"], sandbox) + + result = await bidi_session.script.evaluate( + expression=expression, + await_promise=False, + result_ownership=result_ownership, + serialization_options=serialization_options, + target=target, + ) + return result + + return evaluate diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/disown/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/disown/handles.py b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/handles.py new file mode 100644 index 0000000000..e1226463a5 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/handles.py @@ -0,0 +1,173 @@ +import pytest + +import webdriver.bidi.error as error + +from webdriver.bidi.modules.script import ContextTarget + +from .. import assert_handle + +pytestmark = pytest.mark.asyncio + + +async def test_basic_handle(bidi_session, top_context, call_function): + remote_value = await bidi_session.script.evaluate( + expression="({a:1})", + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"]), + ) + + assert_handle(remote_value, True) + + result = await call_function("arg => arg.a", [remote_value]) + + assert result == {"type": "number", "value": 1} + + await bidi_session.script.disown( + handles=[remote_value["handle"]], target=ContextTarget(top_context["context"]) + ) + + with pytest.raises(error.NoSuchHandleException): + await call_function("arg => arg.a", [remote_value]) + + +async def test_multiple_handles_for_different_objects( + bidi_session, top_context, call_function +): + # Create a handle + remote_value_a = await bidi_session.script.evaluate( + expression="({a:1})", + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"]), + ) + + remote_value_b = await bidi_session.script.evaluate( + expression="({b:2})", + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"]), + ) + + remote_value_c = await bidi_session.script.evaluate( + expression="({c:3})", + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"]), + ) + + assert_handle(remote_value_a, True) + assert_handle(remote_value_b, True) + assert_handle(remote_value_c, True) + + # disown a and b + await bidi_session.script.disown( + handles=[remote_value_a["handle"], remote_value_b["handle"]], + target=ContextTarget(top_context["context"]), + ) + + # using handle a or b should raise an exception + with pytest.raises(error.NoSuchHandleException): + await call_function("arg => arg.a", [remote_value_a]) + + with pytest.raises(error.NoSuchHandleException): + await call_function("arg => arg.b", [remote_value_b]) + + # remote value c should still work + result = await call_function("arg => arg.c", [remote_value_c]) + + assert result == {"type": "number", "value": 3} + + # disown c + await bidi_session.script.disown( + handles=[remote_value_c["handle"]], target=ContextTarget(top_context["context"]) + ) + + # using handle c should raise an exception + with pytest.raises(error.NoSuchHandleException): + await call_function("arg => arg.c", [remote_value_c]) + + +async def test_multiple_handles_for_same_object( + bidi_session, top_context, call_function +): + remote_value1 = await bidi_session.script.evaluate( + expression="window.test = { a: 1 }; window.test", + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"]), + ) + assert_handle(remote_value1, True) + + remote_value2 = await bidi_session.script.evaluate( + expression="window.test", + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"]), + ) + assert_handle(remote_value2, True) + + # Check that both handles can be used + result = await call_function("arg => arg.a", [remote_value1]) + assert result == {"type": "number", "value": 1} + + result = await call_function("arg => arg.a", [remote_value2]) + assert result == {"type": "number", "value": 1} + + # Check that both handles point to the same value + result = await call_function( + "(arg1, arg2) => arg1 === arg2", [remote_value1, remote_value2] + ) + assert result == {"type": "boolean", "value": True} + + # Disown the handle 1 + await bidi_session.script.disown( + handles=[remote_value1["handle"]], target=ContextTarget(top_context["context"]) + ) + + # Using handle 1 should raise an exception + with pytest.raises(error.NoSuchHandleException): + await call_function("arg => arg.a", [remote_value1]) + + # Using handle 2 should still work + result = await call_function("arg => arg.a", [remote_value2]) + assert result == {"type": "number", "value": 1} + + # Disown the handle 2 + await bidi_session.script.disown( + handles=[remote_value2["handle"]], target=ContextTarget(top_context["context"]) + ) + + # Using handle 2 should raise an exception + with pytest.raises(error.NoSuchHandleException): + await call_function("arg => arg.a", [remote_value2]) + + +async def test_unknown_handle(bidi_session, top_context, call_function): + # Create a handle + remote_value = await bidi_session.script.evaluate( + expression="({a:1})", + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"]), + ) + + assert_handle(remote_value, True) + + # An unknown handle should not remove other handles, and should not fail + await bidi_session.script.disown( + handles=["unknown_handle"], target=ContextTarget(top_context["context"]) + ) + + result = await call_function("arg => arg.a", [remote_value]) + + assert result == {"type": "number", "value": 1} + + # Passing an unknown handle with an existing handle should disown the existing one + await bidi_session.script.disown( + handles=["unknown_handle", remote_value["handle"]], + target=ContextTarget(top_context["context"]), + ) + + with pytest.raises(error.NoSuchHandleException): + await call_function("arg => arg.a", [remote_value]) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid.py new file mode 100644 index 0000000000..f9849f3e39 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid.py @@ -0,0 +1,68 @@ +import pytest +import webdriver.bidi.error as error + +from webdriver.bidi.modules.script import ContextTarget, RealmTarget + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("target", [None, False, "foo", 42, {}, []]) +async def test_params_target_invalid_type(bidi_session, target): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.disown( + handles=[], + target=target) + + +@pytest.mark.parametrize("context", [None, False, 42, {}, []]) +async def test_params_context_invalid_type(bidi_session, context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.disown( + handles=[], + target=ContextTarget(context)) + + +@pytest.mark.parametrize("sandbox", [False, 42, {}, []]) +async def test_params_sandbox_invalid_type(bidi_session, top_context, sandbox): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.disown( + handles=[], + target=ContextTarget(top_context["context"], sandbox)) + + +async def test_params_context_unknown(bidi_session): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.script.disown( + handles=[], + target=ContextTarget("_UNKNOWN_")) + + +@pytest.mark.parametrize("realm", [None, False, 42, {}, []]) +async def test_params_realm_invalid_type(bidi_session, realm): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.disown( + handles=[], + target=RealmTarget(realm)) + + +async def test_params_realm_unknown(bidi_session): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.script.disown( + handles=[], + target=RealmTarget("_UNKNOWN_")) + + +@pytest.mark.parametrize("handles", [None, False, "foo", 42, {}]) +async def test_params_handles_invalid_type(bidi_session, top_context, handles): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.disown( + handles=handles, + target=ContextTarget(top_context["context"])) + + +@pytest.mark.parametrize("handle", [None, False, 42, {}, []]) +async def test_params_handles_invalid_handle_type(bidi_session, top_context, handle): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.disown( + handles=[handle], + target=ContextTarget(top_context["context"])) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid_tentative.py b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid_tentative.py new file mode 100644 index 0000000000..730e2f575f --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid_tentative.py @@ -0,0 +1,35 @@ +import pytest +import webdriver.bidi.error as error + +from webdriver.bidi.modules.script import ContextTarget + +pytestmark = pytest.mark.asyncio + + +# The following tests are marked as tentative until +# https://github.com/w3c/webdriver-bidi/issues/274 is resolved. +async def test_params_target_invalid_value(bidi_session, top_context): + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="() => 1 + 2", + target=ContextTarget(top_context["context"]), + await_promise=True, + ) + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.disown( + handles=[], + target={"context": top_context["context"], "realm": result["realm"]}, + ) + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.disown( + handles=[], + target={"sandbox": "foo", "realm": result["realm"]}, + ) + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.disown( + handles=[], + target={"sandbox": "bar"}, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/disown/target.py b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/target.py new file mode 100644 index 0000000000..67d857a041 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/target.py @@ -0,0 +1,95 @@ +import pytest + +import webdriver.bidi.error as error + +from webdriver.bidi.modules.script import ContextTarget, RealmTarget + +from .. import assert_handle + +pytestmark = pytest.mark.asyncio + + +async def test_realm(bidi_session, top_context, call_function): + remote_value = await bidi_session.script.evaluate( + raw_result=True, + expression="({a:1})", + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"]), + ) + + assert_handle(remote_value["result"], True) + + result = await call_function("arg => arg.a", [remote_value["result"]]) + + assert result == {"type": "number", "value": 1} + + await bidi_session.script.disown( + handles=[remote_value["result"]["handle"]], + target=RealmTarget(remote_value["realm"]), + ) + + with pytest.raises(error.NoSuchHandleException): + await call_function("arg => arg.a", [remote_value["result"]]) + + +async def test_sandbox(bidi_session, top_context, call_function): + # Create a remote value outside of any sandbox + remote_value = await bidi_session.script.evaluate( + expression="({a:'without sandbox'})", + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"]), + ) + + # Create a remote value from a sandbox + sandbox_value = await bidi_session.script.evaluate( + expression="({a:'with sandbox'})", + await_promise=False, + result_ownership="root", + target=ContextTarget(top_context["context"], "basic_sandbox"), + ) + + # Try to disown the non-sandboxed remote value from the sandbox + await bidi_session.script.disown( + handles=[remote_value["handle"]], + target=ContextTarget(top_context["context"], "basic_sandbox"), + ) + + # Check that the remote value is still working + result = await call_function("arg => arg.a", [remote_value]) + assert result == {"type": "string", "value": "without sandbox"} + + # Try to disown the sandbox value: + # - from the non-sandboxed top context + # - from another sandbox + await bidi_session.script.disown( + handles=[sandbox_value["handle"]], target=ContextTarget(top_context["context"]) + ) + await bidi_session.script.disown( + handles=[sandbox_value["handle"]], + target=ContextTarget(top_context["context"], "another_sandbox"), + ) + + # Check that the sandbox remote value is still working + result = await call_function( + "arg => arg.a", [sandbox_value], sandbox="basic_sandbox" + ) + assert result == {"type": "string", "value": "with sandbox"} + + # Disown the sandbox remote value from the correct sandbox + await bidi_session.script.disown( + handles=[sandbox_value["handle"]], + target=ContextTarget(top_context["context"], "basic_sandbox"), + ) + + with pytest.raises(error.NoSuchHandleException): + await call_function("arg => arg.a", [sandbox_value], sandbox="basic_sandbox") + + # Disown the non-sandboxed remote value from the top context + await bidi_session.script.disown( + handles=[remote_value["handle"]], target=ContextTarget(top_context["context"]) + ) + + with pytest.raises(error.NoSuchHandleException): + await call_function("arg => arg.a", [remote_value], sandbox="basic_sandbox") diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/__init__.py new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/__init__.py @@ -0,0 +1 @@ + diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/await_promise.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/await_promise.py new file mode 100644 index 0000000000..e99b657506 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/await_promise.py @@ -0,0 +1,220 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException + +from ... import any_int, any_string, recursive_compare +from .. import any_stack_trace + + +@pytest.mark.asyncio +async def test_await_promise_delayed(bidi_session, top_context): + result = await bidi_session.script.evaluate( + expression=""" + new Promise(r => {{ + setTimeout(() => r("SOME_DELAYED_RESULT"), 0); + }}) + """, + await_promise=True, + target=ContextTarget(top_context["context"]), + ) + + assert result == {"type": "string", "value": "SOME_DELAYED_RESULT"} + + +@pytest.mark.asyncio +async def test_await_promise_rejected(bidi_session, top_context): + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.evaluate( + expression="Promise.reject('SOME_REJECTED_RESULT')", + target=ContextTarget(top_context["context"]), + await_promise=True, + ) + + recursive_compare( + { + "realm": any_string, + "exceptionDetails": { + "columnNumber": any_int, + "exception": {"type": "string", "value": "SOME_REJECTED_RESULT"}, + "lineNumber": any_int, + "stackTrace": any_stack_trace, + "text": any_string, + }, + }, + exception.value.result, + ) + + +@pytest.mark.asyncio +async def test_await_promise_resolved(bidi_session, top_context): + result = await bidi_session.script.evaluate( + expression="Promise.resolve('SOME_RESOLVED_RESULT')", + target=ContextTarget(top_context["context"]), + await_promise=True, + ) + + assert result == {"type": "string", "value": "SOME_RESOLVED_RESULT"} + + +@pytest.mark.asyncio +async def test_await_resolve_array(bidi_session, top_context): + result = await bidi_session.script.evaluate( + expression="Promise.resolve([1, 'text', true, ['will be serialized']])", + await_promise=True, + target=ContextTarget(top_context["context"]), + ) + + assert result == { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "string", "value": "text"}, + {"type": "boolean", "value": True}, + {"type": "array", "value": [{"type": "string", "value": "will be serialized"}]}, + ], + } + + +@pytest.mark.asyncio +async def test_await_resolve_date(bidi_session, top_context): + result = await bidi_session.script.evaluate( + expression="Promise.resolve(new Date(0))", + await_promise=True, + target=ContextTarget(top_context["context"]), + ) + + assert result == { + "type": "date", + "value": "1970-01-01T00:00:00.000Z", + } + + +@pytest.mark.asyncio +async def test_await_resolve_map(bidi_session, top_context): + result = await bidi_session.script.evaluate( + expression=""" + Promise.resolve( + new Map([ + ['key1', 'value1'], + [2, new Date(0)], + ['key3', new Map([['key4', 'serialized']])] + ]) + )""", + await_promise=True, + target=ContextTarget(top_context["context"]), + ) + + assert result == { + "type": "map", + "value": [ + ["key1", {"type": "string", "value": "value1"}], + [ + {"type": "number", "value": 2}, + {"type": "date", "value": "1970-01-01T00:00:00.000Z"}, + ], + ["key3", {"type": "map", "value": [[ + "key4", + {"type": "string", "value": "serialized"} + ]]}], + ], + } + + +@pytest.mark.parametrize( + "expression, expected, type", + [ + ("undefined", None, "undefined"), + ("null", None, "null"), + ('"text"', "text", "string"), + ("42", 42, "number"), + ("Number.NaN", "NaN", "number"), + ("-0", "-0", "number"), + ("Infinity", "Infinity", "number"), + ("-Infinity", "-Infinity", "number"), + ("true", True, "boolean"), + ("false", False, "boolean"), + ("42n", "42", "bigint"), + ], +) +@pytest.mark.asyncio +async def test_await_resolve_primitive( + bidi_session, top_context, expression, expected, type +): + result = await bidi_session.script.evaluate( + expression=f"Promise.resolve({expression})", + await_promise=True, + target=ContextTarget(top_context["context"]), + ) + + if expected is None: + assert result == {"type": type} + else: + assert result == {"type": type, "value": expected} + + +@pytest.mark.asyncio +async def test_await_resolve_regexp(bidi_session, top_context): + result = await bidi_session.script.evaluate( + expression="Promise.resolve(/test/i)", + await_promise=True, + target=ContextTarget(top_context["context"]), + ) + + assert result == { + "type": "regexp", + "value": { + "pattern": "test", + "flags": "i", + }, + } + + +@pytest.mark.asyncio +async def test_await_resolve_set(bidi_session, top_context): + result = await bidi_session.script.evaluate( + expression=""" + Promise.resolve( + new Set([ + 'value1', + 2, + true, + new Date(0), + new Set([-1, 'serialized']) + ]) + )""", + await_promise=True, + target=ContextTarget(top_context["context"]), + ) + + assert result == { + "type": "set", + "value": [ + {"type": "string", "value": "value1"}, + {"type": "number", "value": 2}, + {"type": "boolean", "value": True}, + {"type": "date", "value": "1970-01-01T00:00:00.000Z"}, + {"type": "set", "value": [{"type": "number", "value": -1}, {"type": "string", "value": "serialized"}]}, + ], + } + + +@pytest.mark.asyncio +async def test_no_await_promise_rejected(bidi_session, top_context): + result = await bidi_session.script.evaluate( + expression="Promise.reject('SOME_REJECTED_RESULT')", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare({"type": "promise"}, result) + + +@pytest.mark.asyncio +async def test_no_await_promise_resolved(bidi_session, top_context): + result = await bidi_session.script.evaluate( + expression="Promise.resolve('SOME_RESOLVED_RESULT')", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare({"type": "promise"}, result) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/evaluate.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/evaluate.py new file mode 100644 index 0000000000..5a8cf61a17 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/evaluate.py @@ -0,0 +1,95 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget, RealmTarget + +from ... import recursive_compare + + +@pytest.mark.asyncio +async def test_eval(bidi_session, top_context): + result = await bidi_session.script.evaluate( + expression="1 + 2", + target=ContextTarget(top_context["context"]), + await_promise=True) + + assert result == { + "type": "number", + "value": 3} + + +@pytest.mark.asyncio +async def test_interact_with_dom(bidi_session, top_context): + result = await bidi_session.script.evaluate( + expression="'window.location.href: ' + window.location.href", + target=ContextTarget(top_context["context"]), + await_promise=True) + + assert result == { + "type": "string", + "value": "window.location.href: about:blank"} + + +@pytest.mark.asyncio +async def test_target_realm(bidi_session, default_realm): + result = await bidi_session.script.evaluate( + raw_result=True, + expression="window.foo = 3", + target=RealmTarget(default_realm), + await_promise=True, + ) + + recursive_compare({"realm": default_realm, "result": {"type": "number", "value": 3}}, result) + + result = await bidi_session.script.evaluate( + raw_result=True, + expression="window.foo", + target=RealmTarget(default_realm), + await_promise=True, + ) + + recursive_compare( + {"realm": default_realm, "result": {"type": "number", "value": 3}}, result + ) + + +@pytest.mark.asyncio +async def test_different_target_realm(bidi_session): + await bidi_session.browsing_context.create(type_hint="tab") + + realms = await bidi_session.script.get_realms() + first_tab_default_realm = realms[0]["realm"] + second_tab_default_realm = realms[1]["realm"] + + assert first_tab_default_realm != second_tab_default_realm + + await bidi_session.script.evaluate( + raw_result=True, + expression="window.foo = 3", + target=RealmTarget(first_tab_default_realm), + await_promise=True, + ) + await bidi_session.script.evaluate( + raw_result=True, + expression="window.foo = 5", + target=RealmTarget(second_tab_default_realm), + await_promise=True, + ) + + top_context_result = await bidi_session.script.evaluate( + raw_result=True, + expression="window.foo", + target=RealmTarget(first_tab_default_realm), + await_promise=True, + ) + recursive_compare( + {"realm": first_tab_default_realm, "result": {"type": "number", "value": 3}}, top_context_result + ) + + new_context_result = await bidi_session.script.evaluate( + raw_result=True, + expression="window.foo", + target=RealmTarget(second_tab_default_realm), + await_promise=True, + ) + recursive_compare( + {"realm": second_tab_default_realm, "result": {"type": "number", "value": 5}}, new_context_result + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/exception_details.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/exception_details.py new file mode 100644 index 0000000000..fa5d5c4ae2 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/exception_details.py @@ -0,0 +1,212 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException + +from ... import any_int, any_string, recursive_compare +from .. import any_stack_trace + + +@pytest.mark.asyncio +async def test_invalid_script(bidi_session, top_context): + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.evaluate( + expression="))) !!@@## some invalid JS script (((", + target=ContextTarget(top_context["context"]), + await_promise=True, + ) + recursive_compare( + { + "realm": any_string, + "exceptionDetails": { + "columnNumber": any_int, + "exception": {"type": "error"}, + "lineNumber": any_int, + "stackTrace": any_stack_trace, + "text": any_string, + }, + }, + exception.value.result, + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("await_promise", [True, False]) +@pytest.mark.parametrize( + "expression, expected", + [ + ("undefined", {"type": "undefined"}), + ("null", {"type": "null"}), + ("'foobar'", {"type": "string", "value": "foobar"}), + ("'2'", {"type": "string", "value": "2"}), + ("Number.NaN", {"type": "number", "value": "NaN"}), + ("-0", {"type": "number", "value": "-0"}), + ("Infinity", {"type": "number", "value": "Infinity"}), + ("-Infinity", {"type": "number", "value": "-Infinity"}), + ("3", {"type": "number", "value": 3}), + ("1.4", {"type": "number", "value": 1.4}), + ("true", {"type": "boolean", "value": True}), + ("false", {"type": "boolean", "value": False}), + ("42n", {"type": "bigint", "value": "42"}), + ("(Symbol('foo'))", {"type": "symbol", },), + ( + "[1, 'foo', true, new RegExp(/foo/g), [1]]", + { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "string", "value": "foo"}, + {"type": "boolean", "value": True}, + { + "type": "regexp", + "value": { + "pattern": "foo", + "flags": "g", + }, + }, + {"type": "array"}, + ], + }, + ), + ( + "({'foo': {'bar': 'baz'}, 'qux': 'quux'})", + { + "type": "object", + "value": [ + ["foo", {"type": "object"}], + ["qux", {"type": "string", "value": "quux"}], + ], + }, + ), + ("(()=>{})", {"type": "function", },), + ("(function(){})", {"type": "function", },), + ("(async ()=>{})", {"type": "function", },), + ("(async function(){})", {"type": "function", },), + ( + "new RegExp(/foo/g)", + { + "type": "regexp", + "value": { + "pattern": "foo", + "flags": "g", + }, + }, + ), + ( + "new Date(1654004849000)", + { + "type": "date", + "value": "2022-05-31T13:47:29.000Z", + }, + ), + ( + "new Map([[1, 2], ['foo', 'bar'], [true, false], ['baz', [1]]])", + { + "type": "map", + "value": [ + [ + {"type": "number", "value": 1}, + {"type": "number", "value": 2}, + ], + ["foo", {"type": "string", "value": "bar"}], + [ + {"type": "boolean", "value": True}, + {"type": "boolean", "value": False}, + ], + ["baz", {"type": "array"}], + ], + }, + ), + ( + "new Set([1, 'foo', true, [1], new Map([[1,2]])])", + { + "type": "set", + "value": [ + {"type": "number", "value": 1}, + {"type": "string", "value": "foo"}, + {"type": "boolean", "value": True}, + {"type": "array"}, + {"type": "map"}, + ], + }, + ), + ("new WeakMap()", {"type": "weakmap", },), + ("new WeakSet()", {"type": "weakset", },), + ("new Error('SOME_ERROR_TEXT')", {"type": "error"},), + # TODO(sadym): add `iterator` test. + # TODO(sadym): add `generator` test. + # TODO(sadym): add `proxy` test. + ("Promise.resolve()", {"type": "promise", },), + ("new Int32Array()", {"type": "typedarray", },), + ("new ArrayBuffer()", {"type": "arraybuffer", },), + ( + "document.createElement('div')", + { + "type": "node", + 'value': { + 'attributes': {}, + 'childNodeCount': 0, + 'localName': 'div', + 'namespaceURI': 'http://www.w3.org/1999/xhtml', + 'nodeType': 1, + } + }, + ), + ("window", {"type": "window", },), + ], +) +@pytest.mark.asyncio +async def test_exception_details(bidi_session, top_context, await_promise, expression, expected): + if await_promise: + expression = f"Promise.reject({expression})" + else: + expression = f"throw {expression}" + + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=await_promise, + ) + + recursive_compare( + { + "realm": any_string, + "exceptionDetails": { + "columnNumber": any_int, + "exception": expected, + "lineNumber": any_int, + "stackTrace": any_stack_trace, + "text": any_string, + }, + }, + exception.value.result, + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("chained", [True, False]) +async def test_rejected_promise(bidi_session, top_context, chained): + if chained: + expression = "Promise.reject('error').then(() => { })" + else: + expression = "Promise.reject('error')" + + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.evaluate( + expression=expression, + await_promise=True, + target=ContextTarget(top_context["context"]), + ) + + recursive_compare( + { + "realm": any_string, + "exceptionDetails": { + "columnNumber": any_int, + "exception": {"type": "string", "value": "error"}, + "lineNumber": any_int, + "stackTrace": any_stack_trace, + "text": any_string, + }, + }, + exception.value.result, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/internal_id.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/internal_id.py new file mode 100644 index 0000000000..98742ef102 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/internal_id.py @@ -0,0 +1,65 @@ +import pytest + +from ... import recursive_compare, any_string + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "return_structure, result_type", + [ + ("[data, data]", "array"), + ("new Map([['foo', data],['bar', data]])", "map"), + ("({ 'foo': data, 'bar': data })", "object"), + ], +) +@pytest.mark.parametrize( + "expression, type", + [ + ("[1]", "array"), + ("new Map([[true, false]])", "map"), + ("new Set(['baz'])", "set"), + ("{ baz: 'qux' }", "object"), + ], +) +async def test_remote_values_with_internal_id( + evaluate, return_structure, result_type, expression, type +): + result = await evaluate(f"{{const data = {expression}; {return_structure}}}") + result_value = result["value"] + + assert len(result_value) == 2 + + if result_type == "array": + value = [ + {"type": type, "internalId": any_string}, + {"type": type, "internalId": any_string}, + ] + internalId1 = result_value[0]["internalId"] + internalId2 = result_value[1]["internalId"] + else: + value = [ + ["foo", {"type": type, "internalId": any_string}], + ["bar", {"type": type, "internalId": any_string}], + ] + internalId1 = result_value[0][1]["internalId"] + internalId2 = result_value[1][1]["internalId"] + + # Make sure that the same duplicated objects have the same internal ids + assert internalId1 == internalId2 + + recursive_compare(value, result_value) + + +@pytest.mark.asyncio +async def test_different_remote_values_have_unique_internal_ids(evaluate): + result = await evaluate( + "{const obj1 = [1]; const obj2 = {'foo': 'bar'}; [obj1, obj2, obj1, obj2]}" + ) + + assert len(result["value"]) == 4 + + internalId1 = result["value"][0]["internalId"] + internalId2 = result["value"][1]["internalId"] + + # Make sure that different duplicated objects have different internal ids + assert internalId1 != internalId2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid.py new file mode 100644 index 0000000000..9923d7414e --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid.py @@ -0,0 +1,153 @@ +import pytest +import webdriver.bidi.error as error + +from webdriver.bidi.modules.script import ContextTarget, RealmTarget, SerializationOptions + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("target", [None, False, "foo", 42, {}, []]) +async def test_params_target_invalid_type(bidi_session, target): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + target=target, + await_promise=True) + + +@pytest.mark.parametrize("context", [None, False, 42, {}, []]) +async def test_params_context_invalid_type(bidi_session, context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + target=ContextTarget(context), + await_promise=True) + + +@pytest.mark.parametrize("sandbox", [False, 42, {}, []]) +async def test_params_sandbox_invalid_type(bidi_session, top_context, sandbox): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + target=ContextTarget(top_context["context"], sandbox), + await_promise=True) + + +async def test_params_context_unknown(bidi_session): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.script.evaluate( + expression="1 + 2", + target=ContextTarget("_UNKNOWN_"), + await_promise=True) + + +@pytest.mark.parametrize("realm", [None, False, 42, {}, []]) +async def test_params_realm_invalid_type(bidi_session, realm): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + target=RealmTarget(realm), + await_promise=True) + + +async def test_params_realm_unknown(bidi_session): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.script.evaluate( + expression="1 + 2", + target=RealmTarget("_UNKNOWN_"), + await_promise=True) + + +@pytest.mark.parametrize("expression", [None, False, 42, {}, []]) +async def test_params_expression_invalid_type(bidi_session, top_context, expression): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=True) + + +@pytest.mark.parametrize("await_promise", [None, "False", 0, 42, {}, []]) +async def test_params_await_promise_invalid_type(bidi_session, top_context, await_promise): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + await_promise=await_promise, + target=ContextTarget(top_context["context"])) + + +@pytest.mark.parametrize("result_ownership", [False, "_UNKNOWN_", 42, {}, []]) +async def test_params_result_ownership_invalid_value(bidi_session, top_context, result_ownership): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + result_ownership=result_ownership, + target=ContextTarget(top_context["context"]), + await_promise=True) + + +@pytest.mark.parametrize("serialization_options", [False, "_UNKNOWN_", 42, []]) +async def test_params_serialization_options_invalid_type(bidi_session, top_context, serialization_options): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + serialization_options=serialization_options, + target=ContextTarget(top_context["context"]), + await_promise=True) + + +@pytest.mark.parametrize("max_dom_depth", [False, "_UNKNOWN_", {}, []]) +async def test_params_max_dom_depth_invalid_type(bidi_session, top_context, max_dom_depth): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + serialization_options=SerializationOptions(max_dom_depth=max_dom_depth), + target=ContextTarget(top_context["context"]), + await_promise=True) + + +async def test_params_max_dom_depth_invalid_value(bidi_session, top_context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + serialization_options=SerializationOptions(max_dom_depth=-1), + target=ContextTarget(top_context["context"]), + await_promise=True) + + +@pytest.mark.parametrize("max_object_depth", [False, "_UNKNOWN_", {}, []]) +async def test_params_max_object_depth_invalid_type(bidi_session, top_context, max_object_depth): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + serialization_options=SerializationOptions(max_object_depth=max_object_depth), + target=ContextTarget(top_context["context"]), + await_promise=True) + + +async def test_params_max_object_depth_invalid_value(bidi_session, top_context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + serialization_options=SerializationOptions(max_object_depth=-1), + target=ContextTarget(top_context["context"]), + await_promise=True) + + +@pytest.mark.parametrize("include_shadow_tree", [False, 42, {}, []]) +async def test_params_max_object_depth_invalid_type(bidi_session, top_context, include_shadow_tree): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + serialization_options=SerializationOptions(include_shadow_tree=include_shadow_tree), + target=ContextTarget(top_context["context"]), + await_promise=True) + + +async def test_params_max_object_depth_invalid_value(bidi_session, top_context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + serialization_options=SerializationOptions(include_shadow_tree="foo"), + target=ContextTarget(top_context["context"]), + await_promise=True) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid_tentative.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid_tentative.py new file mode 100644 index 0000000000..e98a697c80 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid_tentative.py @@ -0,0 +1,38 @@ +import pytest +import webdriver.bidi.error as error + +from webdriver.bidi.modules.script import ContextTarget + +pytestmark = pytest.mark.asyncio + + +# The following tests are marked as tentative until +# https://github.com/w3c/webdriver-bidi/issues/274 is resolved. +async def test_params_target_invalid_value(bidi_session, top_context): + result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(top_context["context"]), + await_promise=True, + ) + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + target={"context": top_context["context"], "realm": result["realm"]}, + await_promise=True, + ) + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.evaluate( + expression="1 + 2", + target={"sandbox": "foo", "realm": result["realm"]}, + await_promise=True, + ) + + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.call_function( + function_declaration="1 + 2", + target={"sandbox": "bar"}, + await_promise=True, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result.py new file mode 100644 index 0000000000..a8278dbfbb --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result.py @@ -0,0 +1,141 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget, SerializationOptions + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ("undefined", {"type": "undefined"}), + ("null", {"type": "null"}), + ("'foobar'", {"type": "string", "value": "foobar"}), + ("'2'", {"type": "string", "value": "2"}), + ("Number.NaN", {"type": "number", "value": "NaN"}), + ("-0", {"type": "number", "value": "-0"}), + ("Infinity", {"type": "number", "value": "Infinity"}), + ("-Infinity", {"type": "number", "value": "-Infinity"}), + ("3", {"type": "number", "value": 3}), + ("1.4", {"type": "number", "value": 1.4}), + ("true", {"type": "boolean", "value": True}), + ("false", {"type": "boolean", "value": False}), + ("42n", {"type": "bigint", "value": "42"}), + ], +) +async def test_primitive_values(bidi_session, top_context, expression, expected): + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=True, + ) + + assert result == expected + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ("(Symbol('foo'))", {"type": "symbol", },), + ( + "[1, 'foo', true, new RegExp(/foo/g), [1]]", + { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "string", "value": "foo"}, + {"type": "boolean", "value": True}, + { + "type": "regexp", + "value": { + "pattern": "foo", + "flags": "g", + }, + }, + {"type": "array"}, + ], + }, + ), + ( + "({'foo': {'bar': 'baz'}, 'qux': 'quux'})", + { + "type": "object", + "value": [ + ["foo", {"type": "object"}], + ["qux", {"type": "string", "value": "quux"}], + ], + }, + ), + ("(()=>{})", {"type": "function", },), + ("(function(){})", {"type": "function", },), + ("(async ()=>{})", {"type": "function", },), + ("(async function(){})", {"type": "function", },), + ( + "new RegExp(/foo/g)", + { + "type": "regexp", + "value": { + "pattern": "foo", + "flags": "g", + }, + }, + ), + ( + "new Date(1654004849000)", + { + "type": "date", + "value": "2022-05-31T13:47:29.000Z", + }, + ), + ( + "new Map([[1, 2], ['foo', 'bar'], [true, false], ['baz', [1]]])", + { + "type": "map", + "value": [ + [ + {"type": "number", "value": 1}, + {"type": "number", "value": 2}, + ], + ["foo", {"type": "string", "value": "bar"}], + [ + {"type": "boolean", "value": True}, + {"type": "boolean", "value": False}, + ], + ["baz", {"type": "array"}], + ], + }, + ), + ( + "new Set([1, 'foo', true, [1], new Map([[1,2]])])", + { + "type": "set", + "value": [ + {"type": "number", "value": 1}, + {"type": "string", "value": "foo"}, + {"type": "boolean", "value": True}, + {"type": "array"}, + {"type": "map"}, + ], + }, + ), + ("new WeakMap()", {"type": "weakmap", },), + ("new WeakSet()", {"type": "weakset", },), + ("new Error('SOME_ERROR_TEXT')", {"type": "error"},), + # TODO(sadym): add `iterator` test. + # TODO(sadym): add `generator` test. + # TODO(sadym): add `proxy` test. + ("Promise.resolve()", {"type": "promise", },), + ("new Int32Array()", {"type": "typedarray", },), + ("new ArrayBuffer()", {"type": "arraybuffer", },), + ("window", {"type": "window", },), + ("new URL('https://example.com')", {"type": "object", },), + ] +) +async def test_remote_values(bidi_session, top_context, expression, expected): + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=False, + serialization_options=SerializationOptions(max_object_depth=1), + ) + + assert result == expected diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_node.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_node.py new file mode 100644 index 0000000000..ae07b940d0 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_node.py @@ -0,0 +1,644 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget, SerializationOptions + +from ... import any_string, recursive_compare + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ( # basic + """ + document.querySelector("br") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "children": [], + "localName": "br", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + }, + }, + ), + ( # attributes + """ + document.querySelector("svg") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": { + "svg:foo": "bar", + }, + "childNodeCount": 0, + "children": [], + "localName": "svg", + "namespaceURI": "http://www.w3.org/2000/svg", + "nodeType": 1, + }, + }, + ), + ( # all children including non-element nodes + """ + document.querySelector("#with-text-node") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-text-node"}, + "childNodeCount": 1, + "children": [{ + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 3, + "nodeValue": "Lorem", + } + }], + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + }, + }, + ), + ( # children limited due to max depth + """ + document.querySelector("#with-children") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-children"}, + "childNodeCount": 2, + "children": [{ + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 1, + "localName": "p", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }, { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "br", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }], + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + }, + }, + ), + ( # not connected + """ + document.createElement("div") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "children": [], + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + }, + }, + ), + ], ids=[ + "basic", + "attributes", + "all_children", + "children_max_depth", + "not_connected", + ] +) +async def test_element_node(bidi_session, get_test_page, top_context, expression, expected): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=False, + serialization_options=SerializationOptions(max_dom_depth=1), + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ( + """ + document.querySelector("input#button").attributes[0] + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "localName": "id", + "namespaceURI": None, + "nodeType": 2, + "nodeValue": "button", + }, + }, + ), ( + """ + document.querySelector("svg").attributes[0] + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "localName": "foo", + "namespaceURI": "http://www.w3.org/2000/svg", + "nodeType": 2, + "nodeValue": "bar", + }, + }, + ), + ], ids=[ + "basic", + "namespaceURI", + ] +) +async def test_attribute_node(bidi_session, get_test_page, top_context, expression, expected): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ( + """ + document.querySelector("#with-text-node").childNodes[0] + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 3, + "nodeValue": "Lorem", + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_text_node(bidi_session, get_test_page, top_context, expression, expected): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ( + """ + document.querySelector("foo").childNodes[1] + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 4, + "nodeValue": " < > & ", + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_cdata_node(bidi_session, inline, new_tab, expression, expected): + xml_page = inline("""CDATA section: & ]]>.""", doctype="xml") + + await bidi_session.browsing_context.navigate( + context=new_tab['context'], url=xml_page, wait="complete" + ) + + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(new_tab["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ( + """ + document.createProcessingInstruction("xml-stylesheet", "href='foo.css'") + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 7, + "nodeValue": "href='foo.css'", + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_processing_instruction_node( + bidi_session, inline, new_tab, expression, expected +): + xml_page = inline("""""", doctype="xml") + + await bidi_session.browsing_context.navigate( + context=new_tab['context'], url=xml_page, wait="complete" + ) + + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(new_tab["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ( + """ + document.querySelector("#with-comment").childNodes[0] + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 8, + "nodeValue": " Comment ", + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_comment_node(bidi_session, get_test_page, top_context, expression, expected): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ( + """ + document + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 2, + "children": [{ + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 10 + } + }, { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 2, + "localName": "html", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }], + "nodeType": 9 + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_document_node(bidi_session, get_test_page, top_context, expression, expected): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=False, + serialization_options=SerializationOptions(max_dom_depth=1), + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ( + """ + document.doctype + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "nodeType": 10, + } + } + ), + ], ids=[ + "basic", + ] +) +async def test_doctype_node(bidi_session, get_test_page, top_context, expression, expected): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ( + """ + document.querySelector("#custom-element").shadowRoot; + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 1, + "children": [{ + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "in-shadow-dom"}, + "childNodeCount": 1, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }], + "nodeType": 11 + } + } + ), + ( + """ + new DocumentFragment(); + """, + { + "type": "node", + "sharedId": any_string, + "value": { + "childNodeCount": 0, + "children": [], + "nodeType": 11, + } + } + ), + ], ids=[ + "shadowRoot", + "not connected" + ] +) +async def test_document_fragment_node( + bidi_session, get_test_page, top_context, expression, expected +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=False, + serialization_options=SerializationOptions(max_dom_depth=1), + ) + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +async def test_node_within_object(bidi_session, get_test_page, top_context): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.evaluate( + expression="""({"elem": document.querySelector("img")})""", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + expected = { + "type": "object", + "value": [ + ["elem", { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "img", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }] + ] + } + + recursive_compare(expected, result) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "expression, expected", + [ + ( + "document.getElementsByTagName('img')", + { + "type": "htmlcollection", + "value": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "img", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }, + ] + } + ), + ( + "document.querySelectorAll('img')", + { + "type": "nodelist", + "value": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "img", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1 + } + }, + ] + } + ), + ], ids=[ + "htmlcollection", + "nodelist" + ] +) +async def test_node_within_dom_collection( + bidi_session, + get_test_page, + top_context, + expression, + expected +): + await bidi_session.browsing_context.navigate( + context=top_context['context'], url=get_test_page(), wait="complete" + ) + + result = await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(top_context["context"]), + await_promise=False, + serialization_options=SerializationOptions(max_dom_depth=1), + ) + + recursive_compare(expected, result) + + +@pytest.mark.parametrize("shadow_root_mode", ["open", "closed"]) +@pytest.mark.asyncio +async def test_custom_element_with_shadow_root( + bidi_session, get_test_page, top_context, shadow_root_mode +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=get_test_page(shadow_root_mode=shadow_root_mode), + wait="complete", + ) + + result = await bidi_session.script.evaluate( + expression="""document.querySelector("#custom-element");""", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare({ + "type": "node", + "sharedId": any_string, + "value": { + "attributes": { + "id": "custom-element", + }, + "childNodeCount": 0, + "localName": "custom-element", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": { + "sharedId": any_string, + "type": "node", + "value": { + "childNodeCount": 1, + "mode": shadow_root_mode, + "nodeType": 11, + } + }, + } + }, result) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_ownership.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_ownership.py new file mode 100644 index 0000000000..ab018699eb --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_ownership.py @@ -0,0 +1,60 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException +from .. import assert_handle + + +@pytest.mark.asyncio +@pytest.mark.parametrize("result_ownership, should_contain_handle", + [("root", True), ("none", False), (None, False)]) +async def test_throw_exception(bidi_session, top_context, result_ownership, should_contain_handle): + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.evaluate( + expression='throw {a:1}', + await_promise=False, + result_ownership=result_ownership, + target=ContextTarget(top_context["context"])) + + assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("result_ownership, should_contain_handle", + [("root", True), ("none", False), (None, False)]) +async def test_invalid_script(bidi_session, top_context, result_ownership, should_contain_handle): + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.evaluate( + expression="))) !!@@## some invalid JS script (((", + await_promise=False, + result_ownership=result_ownership, + target=ContextTarget(top_context["context"])) + + assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("result_ownership, should_contain_handle", + [("root", True), ("none", False), (None, False)]) +async def test_rejected_promise(bidi_session, top_context, result_ownership, should_contain_handle): + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.evaluate( + expression="Promise.reject({a:1})", + await_promise=True, + result_ownership=result_ownership, + target=ContextTarget(top_context["context"])) + + assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("await_promise", [True, False]) +@pytest.mark.parametrize("result_ownership, should_contain_handle", + [("root", True), ("none", False), (None, False)]) +async def test_return_value(bidi_session, top_context, await_promise, result_ownership, should_contain_handle): + result = await bidi_session.script.evaluate( + expression="Promise.resolve({a: {b:1}})", + await_promise=await_promise, + result_ownership=result_ownership, + target=ContextTarget(top_context["context"])) + + assert_handle(result, should_contain_handle) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/sandbox.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/sandbox.py new file mode 100644 index 0000000000..3a6771780d --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/sandbox.py @@ -0,0 +1,199 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget, RealmTarget, ScriptEvaluateResultException + +from ... import any_int, any_string, recursive_compare +from .. import any_stack_trace + + +@pytest.mark.asyncio +async def test_sandbox(bidi_session, new_tab): + # Make changes in window + await bidi_session.script.evaluate( + expression="window.foo = 1", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + + # Check that changes are not present in sandbox + result_in_sandbox = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_tab["context"], "sandbox"), + await_promise=True, + ) + assert result_in_sandbox == {"type": "undefined"} + + # Make changes in sandbox + await bidi_session.script.evaluate( + expression="window.bar = 1", + target=ContextTarget(new_tab["context"], "sandbox"), + await_promise=True, + ) + + # Make sure that changes are present in sandbox + result_in_sandbox = await bidi_session.script.evaluate( + expression="window.bar", + target=ContextTarget(new_tab["context"], "sandbox"), + await_promise=True, + ) + assert result_in_sandbox == {"type": "number", "value": 1} + + # Make sure that changes didn't leak from sandbox + result = await bidi_session.script.evaluate( + expression="window.bar", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + assert result == {"type": "undefined"} + + +@pytest.mark.asyncio +async def test_sandbox_with_empty_name(bidi_session, new_tab): + # An empty string as a `sandbox` means the default realm should be used. + await bidi_session.script.evaluate( + expression="window.foo = 'bar'", + target=ContextTarget(new_tab["context"], ""), + await_promise=True, + ) + + # Make sure that we can find the sandbox with the empty name. + result = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_tab["context"], ""), + await_promise=True, + ) + assert result == {"type": "string", "value": "bar"} + + # Make sure that we can find the value in the default realm. + result = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + assert result == {"type": "string", "value": "bar"} + + +@pytest.mark.asyncio +async def test_switch_sandboxes(bidi_session, new_tab): + # Test that sandboxes are retained when switching between them + await bidi_session.script.evaluate( + expression="window.foo = 1", + target=ContextTarget(new_tab["context"], "sandbox_1"), + await_promise=True, + ) + await bidi_session.script.evaluate( + expression="window.foo = 2", + target=ContextTarget(new_tab["context"], "sandbox_2"), + await_promise=True, + ) + + result_in_sandbox_1 = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_tab["context"], "sandbox_1"), + await_promise=True, + ) + assert result_in_sandbox_1 == {"type": "number", "value": 1} + + result_in_sandbox_2 = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_tab["context"], "sandbox_2"), + await_promise=True, + ) + assert result_in_sandbox_2 == {"type": "number", "value": 2} + + +@pytest.mark.asyncio +async def test_sandbox_with_side_effects(bidi_session, new_tab): + # Make sure changing the node in sandbox will affect the other sandbox as well + await bidi_session.script.evaluate( + expression="document.querySelector('body').textContent = 'foo'", + target=ContextTarget(new_tab["context"], "sandbox_1"), + await_promise=True, + ) + expected_value = {"type": "string", "value": "foo"} + + result_in_sandbox_1 = await bidi_session.script.evaluate( + expression="document.querySelector('body').textContent", + target=ContextTarget(new_tab["context"], "sandbox_1"), + await_promise=True, + ) + assert result_in_sandbox_1 == expected_value + + result_in_sandbox_2 = await bidi_session.script.evaluate( + expression="document.querySelector('body').textContent", + target=ContextTarget(new_tab["context"], "sandbox_2"), + await_promise=True, + ) + assert result_in_sandbox_2 == expected_value + + +@pytest.mark.asyncio +async def test_sandbox_returns_same_node(bidi_session, new_tab): + node = await bidi_session.script.evaluate( + expression="document.querySelector('body')", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + recursive_compare({"type": "node", "sharedId": any_string}, node) + + node_sandbox = await bidi_session.script.evaluate( + expression="document.querySelector('body')", + target=ContextTarget(new_tab["context"], sandbox="sandbox_1"), + await_promise=True, + ) + assert node_sandbox == node + + +@pytest.mark.asyncio +@pytest.mark.parametrize("await_promise", [True, False]) +async def test_exception_details(bidi_session, new_tab, await_promise): + if await_promise: + expression = "Promise.reject(1)" + else: + expression = "throw 1" + + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.evaluate( + expression=expression, + target=ContextTarget(new_tab["context"], "sandbox"), + await_promise=await_promise, + ) + + recursive_compare( + { + "realm": any_string, + "exceptionDetails": { + "columnNumber": any_int, + "exception": {"type": "number", "value": 1}, + "lineNumber": any_int, + "stackTrace": any_stack_trace, + "text": any_string, + }, + }, + exception.value.result, + ) + + +@pytest.mark.asyncio +async def test_target_realm(bidi_session, top_context, default_realm): + result = await bidi_session.script.evaluate( + raw_result=True, + expression="window.foo = 3", + target=ContextTarget(top_context["context"], "sandbox"), + await_promise=True, + ) + realm = result["realm"] + + # Make sure that sandbox realm id is different from default + assert realm != default_realm + + result = await bidi_session.script.evaluate( + raw_result=True, + expression="window.foo", + target=RealmTarget(realm), + await_promise=True, + ) + + recursive_compare( + {"realm": realm, "result": {"type": "number", "value": 3}}, result + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/serialization_options.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/serialization_options.py new file mode 100644 index 0000000000..73df588341 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/serialization_options.py @@ -0,0 +1,444 @@ +import pytest +from webdriver.bidi.modules.script import ContextTarget, SerializationOptions + +from ... import any_string, recursive_compare + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize( + "include_shadow_tree, shadow_root_mode, expected", + [ + ( + None, + "open", + { + "type": "node", + "sharedId": any_string, + "value": {"nodeType": 11, "childNodeCount": 1}, + }, + ), + ( + None, + "closed", + { + "type": "node", + "sharedId": any_string, + "value": {"nodeType": 11, "childNodeCount": 1}, + }, + ), + ( + "none", + "open", + { + "type": "node", + "sharedId": any_string, + "value": {"nodeType": 11, "childNodeCount": 1}, + }, + ), + ( + "none", + "closed", + { + "type": "node", + "sharedId": any_string, + "value": {"nodeType": 11, "childNodeCount": 1}, + }, + ), + ( + "open", + "open", + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 11, + "childNodeCount": 1, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 1, + "attributes": {"id": "in-shadow-dom"}, + "shadowRoot": None, + }, + } + ], + "mode": "open", + }, + }, + ), + ( + "open", + "closed", + { + "type": "node", + "sharedId": any_string, + "value": {"nodeType": 11, "childNodeCount": 1}, + }, + ), + ( + "all", + "open", + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 11, + "childNodeCount": 1, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 1, + "attributes": {"id": "in-shadow-dom"}, + "shadowRoot": None, + }, + } + ], + "mode": "open", + }, + }, + ), + ( + "all", + "closed", + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 11, + "childNodeCount": 1, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 1, + "attributes": {"id": "in-shadow-dom"}, + "shadowRoot": None, + }, + } + ], + "mode": "closed", + }, + }, + ), + ], + ids=[ + "default mode for open shadow root", + "default mode for closed shadow root", + "'none' mode for open shadow root", + "'none' mode for closed shadow root", + "'open' mode for open shadow root", + "'open' mode for closed shadow root", + "'all' mode for open shadow root", + "'all' mode for closed shadow root", + ], +) +async def test_include_shadow_tree( + bidi_session, + top_context, + get_test_page, + include_shadow_tree, + shadow_root_mode, + expected, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=get_test_page(shadow_root_mode=shadow_root_mode), + wait="complete", + ) + result = await bidi_session.script.evaluate( + expression="""document.querySelector("custom-element")""", + target=ContextTarget(top_context["context"]), + await_promise=True, + serialization_options=SerializationOptions( + include_shadow_tree=include_shadow_tree, max_dom_depth=1 + ), + ) + + recursive_compare(expected, result["value"]["shadowRoot"]) + + +@pytest.mark.parametrize( + "max_dom_depth, expected", + [ + ( + None, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-children"}, + "childNodeCount": 2, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ), + ( + 0, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-children"}, + "childNodeCount": 2, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ), + ( + 1, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-children"}, + "childNodeCount": 2, + "children": [ + { + "sharedId": any_string, + "type": "node", + "value": { + "attributes": {}, + "childNodeCount": 1, + "localName": "p", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + { + "sharedId": any_string, + "type": "node", + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "br", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ], + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ), + ( + 2, + { + "type": "node", + "sharedId": any_string, + "value": { + "attributes": {"id": "with-children"}, + "childNodeCount": 2, + "children": [ + { + "sharedId": any_string, + "type": "node", + "value": { + "attributes": {}, + "childNodeCount": 1, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "span", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 0, + "attributes": {}, + "shadowRoot": None, + }, + } + ], + "localName": "p", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + { + "sharedId": any_string, + "type": "node", + "value": { + "attributes": {}, + "childNodeCount": 0, + "localName": "br", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ], + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "nodeType": 1, + "shadowRoot": None, + }, + }, + ), + ], +) +async def test_max_dom_depth( + bidi_session, top_context, get_test_page, max_dom_depth, expected +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=get_test_page(), wait="complete" + ) + result = await bidi_session.script.evaluate( + expression="""document.querySelector("div#with-children")""", + target=ContextTarget(top_context["context"]), + await_promise=True, + serialization_options=SerializationOptions(max_dom_depth=max_dom_depth), + ) + + recursive_compare(expected, result) + + +async def test_max_dom_depth_null( + bidi_session, + send_blocking_command, + top_context, + get_test_page, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=get_test_page(), wait="complete" + ) + result = await send_blocking_command( + "script.evaluate", + { + "expression": """document.querySelector("div#with-children")""", + "target": ContextTarget(top_context["context"]), + "awaitPromise": True, + "serializationOptions": {"maxDomDepth": None}, + }, + ) + + recursive_compare( + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "div", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 2, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "p", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 1, + "children": [ + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "span", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 0, + "children": [], + "attributes": {}, + "shadowRoot": None, + }, + } + ], + "attributes": {}, + "shadowRoot": None, + }, + }, + { + "type": "node", + "sharedId": any_string, + "value": { + "nodeType": 1, + "localName": "br", + "namespaceURI": "http://www.w3.org/1999/xhtml", + "childNodeCount": 0, + "children": [], + "attributes": {}, + "shadowRoot": None, + }, + }, + ], + "attributes": {"id": "with-children"}, + "shadowRoot": None, + }, + }, + result["result"], + ) + + +@pytest.mark.parametrize( + "max_object_depth, expected", + [ + ( + None, + { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "array", "value": [{"type": "number", "value": 2}]}, + ], + }, + ), + (0, {"type": "array"}), + ( + 1, + { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "array"}, + ], + }, + ), + ( + 2, + { + "type": "array", + "value": [ + {"type": "number", "value": 1}, + {"type": "array", "value": [{"type": "number", "value": 2}]}, + ], + }, + ), + ], +) +async def test_max_object_depth(bidi_session, top_context, max_object_depth, expected): + result = await bidi_session.script.evaluate( + expression="[1, [2]]", + target=ContextTarget(top_context["context"]), + await_promise=True, + serialization_options=SerializationOptions(max_object_depth=max_object_depth), + ) + + assert result == expected diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/strict_mode.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/strict_mode.py new file mode 100644 index 0000000000..bcaebb51f4 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/strict_mode.py @@ -0,0 +1,34 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException +from ... import any_int, any_string, recursive_compare +from .. import any_stack_trace, specific_error_response + + +@pytest.mark.asyncio +async def test_strict_mode(bidi_session, top_context): + # As long as there is no `SOME_VARIABLE`, the command should fail in strict mode. + with pytest.raises(ScriptEvaluateResultException) as exception: + await bidi_session.script.evaluate( + expression="'use strict';SOME_VARIABLE=1", + target=ContextTarget(top_context["context"]), + await_promise=True) + recursive_compare(specific_error_response({"type": "error"}), exception.value.result) + + # In non-strict mode, the command should succeed and global `SOME_VARIABLE` should be created. + result = await bidi_session.script.evaluate( + expression="SOME_VARIABLE=1", + target=ContextTarget(top_context["context"]), + await_promise=True) + assert result == { + "type": "number", + "value": 1} + + # Access created by the previous command `SOME_VARIABLE`. + result = await bidi_session.script.evaluate( + expression="'use strict';SOME_VARIABLE=1", + target=ContextTarget(top_context["context"]), + await_promise=True) + assert result == { + "type": "number", + "value": 1} diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/context.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/context.py new file mode 100644 index 0000000000..1d765c7b4a --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/context.py @@ -0,0 +1,70 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget + +from ... import recursive_compare + + +@pytest.mark.asyncio +async def test_context( + bidi_session, + test_alt_origin, + test_origin, + test_page_cross_origin_frame, +): + new_context = await bidi_session.browsing_context.create(type_hint="tab") + await bidi_session.browsing_context.navigate( + context=new_context["context"], + url=test_page_cross_origin_frame, + wait="complete", + ) + + # Evaluate to get realm id + new_context_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(new_context["context"]), + await_promise=False, + ) + + result = await bidi_session.script.get_realms(context=new_context["context"]) + + recursive_compare( + [ + { + "context": new_context["context"], + "origin": test_origin, + "realm": new_context_result["realm"], + "type": "window", + }, + ], + result, + ) + + contexts = await bidi_session.browsing_context.get_tree(root=new_context["context"]) + assert len(contexts) == 1 + frames = contexts[0]["children"] + assert len(frames) == 1 + frame_context = frames[0]["context"] + + # Evaluate to get realm id + frame_context_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(frame_context), + await_promise=False, + ) + + result = await bidi_session.script.get_realms(context=frame_context) + + recursive_compare( + [ + { + "context": frame_context, + "origin": test_alt_origin, + "realm": frame_context_result["realm"], + "type": "window", + }, + ], + result, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/get_realms.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/get_realms.py new file mode 100644 index 0000000000..4dfce5ab49 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/get_realms.py @@ -0,0 +1,183 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget + +from ... import any_string, recursive_compare + +PAGE_ABOUT_BLANK = "about:blank" + + +@pytest.mark.asyncio +async def test_payload_types(bidi_session): + result = await bidi_session.script.get_realms() + + recursive_compare( + [ + { + "context": any_string, + "origin": any_string, + "realm": any_string, + "type": any_string, + } + ], + result, + ) + + +@pytest.mark.asyncio +async def test_realm_is_consistent_when_calling_twice(bidi_session): + result = await bidi_session.script.get_realms() + + result_calling_again = await bidi_session.script.get_realms() + + assert result[0]["realm"] == result_calling_again[0]["realm"] + + +@pytest.mark.asyncio +async def test_realm_is_different_after_reload(bidi_session, top_context): + result = await bidi_session.script.get_realms() + + # Reload the page + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete" + ) + + result_after_reload = await bidi_session.script.get_realms() + + assert result[0]["realm"] != result_after_reload[0]["realm"] + + +@pytest.mark.asyncio +@pytest.mark.parametrize("type_hint", ["tab", "window"]) +async def test_multiple_top_level_contexts(bidi_session, top_context, type_hint): + new_context = await bidi_session.browsing_context.create(type_hint=type_hint) + result = await bidi_session.script.get_realms() + + # Evaluate to get realm ids + top_context_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + new_context_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(new_context["context"]), + await_promise=False, + ) + + recursive_compare( + [ + { + "context": top_context["context"], + "origin": "null", + "realm": top_context_result["realm"], + "type": "window", + }, + { + "context": new_context["context"], + "origin": "null", + "realm": new_context_result["realm"], + "type": "window", + }, + ], + result, + ) + + +@pytest.mark.asyncio +async def test_iframes( + bidi_session, + top_context, + test_alt_origin, + test_origin, + test_page_cross_origin_frame, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], + url=test_page_cross_origin_frame, + wait="complete", + ) + + result = await bidi_session.script.get_realms() + + # Evaluate to get realm id + top_context_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"]) + assert len(contexts) == 1 + frames = contexts[0]["children"] + assert len(frames) == 1 + frame_context = frames[0]["context"] + + # Evaluate to get realm id + frame_context_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(frame_context), + await_promise=False, + ) + + recursive_compare( + [ + { + "context": top_context["context"], + "origin": test_origin, + "realm": top_context_result["realm"], + "type": "window", + }, + { + "context": frame_context, + "origin": test_alt_origin, + "realm": frame_context_result["realm"], + "type": "window", + }, + ], + result, + ) + + # Clean up origin + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete" + ) + + +@pytest.mark.asyncio +async def test_origin(bidi_session, inline, top_context, test_origin): + url = inline("

foo
") + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + + result = await bidi_session.script.get_realms() + + # Evaluate to get realm id + top_context_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare( + [ + { + "context": top_context["context"], + "origin": test_origin, + "realm": top_context_result["realm"], + "type": "window", + } + ], + result, + ) + + # Clean up origin + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete" + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/invalid.py new file mode 100644 index 0000000000..c15378a6e0 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/invalid.py @@ -0,0 +1,26 @@ +import pytest +import webdriver.bidi.error as error + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("context", [False, 42, {}, []]) +async def test_params_context_invalid_type(bidi_session, context): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.get_realms(context=context) + + +async def test_params_context_invalid_value(bidi_session): + with pytest.raises(error.NoSuchFrameException): + await bidi_session.script.get_realms(context="foo") + + +@pytest.mark.parametrize("type", [False, 42, {}, []]) +async def test_params_type_invalid_type(bidi_session, type): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.get_realms(type=type) + + +async def test_params_type_invalid_value(bidi_session): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.get_realms(type="foo") diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/sandbox.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/sandbox.py new file mode 100644 index 0000000000..6ce1fee552 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/sandbox.py @@ -0,0 +1,238 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget + +from ... import recursive_compare + +PAGE_ABOUT_BLANK = "about:blank" + + +@pytest.mark.asyncio +async def test_sandbox(bidi_session, top_context): + evaluate_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + # Create a sandbox + evaluate_in_sandbox_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(top_context["context"], "sandbox"), + await_promise=False, + ) + + result = await bidi_session.script.get_realms() + + recursive_compare( + [ + { + "context": top_context["context"], + "origin": "null", + "realm": evaluate_result["realm"], + "type": "window", + }, + { + "context": top_context["context"], + "origin": "null", + "realm": evaluate_in_sandbox_result["realm"], + "sandbox": "sandbox", + "type": "window", + }, + ], + result, + ) + + # Reload to clean up sandboxes + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete" + ) + + +@pytest.mark.asyncio +async def test_origin(bidi_session, inline, top_context, test_origin): + url = inline("
foo
") + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=url, wait="complete" + ) + + evaluate_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + # Create a sandbox + evaluate_in_sandbox_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(top_context["context"], "sandbox"), + await_promise=False, + ) + + result = await bidi_session.script.get_realms() + + recursive_compare( + [ + { + "context": top_context["context"], + "origin": test_origin, + "realm": evaluate_result["realm"], + "type": "window", + }, + { + "context": top_context["context"], + "origin": test_origin, + "realm": evaluate_in_sandbox_result["realm"], + "sandbox": "sandbox", + "type": "window", + }, + ], + result, + ) + + # Reload to clean up sandboxes + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete" + ) + + +@pytest.mark.asyncio +async def test_type(bidi_session, top_context): + evaluate_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + # Create a sandbox + evaluate_in_sandbox_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(top_context["context"], "sandbox"), + await_promise=False, + ) + + # Should be extended when more types are supported + result = await bidi_session.script.get_realms(type="window") + + recursive_compare( + [ + { + "context": top_context["context"], + "origin": "null", + "realm": evaluate_result["realm"], + "type": "window", + }, + { + "context": top_context["context"], + "origin": "null", + "realm": evaluate_in_sandbox_result["realm"], + "sandbox": "sandbox", + "type": "window", + }, + ], + result, + ) + + # Reload to clean up sandboxes + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete" + ) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("type_hint", ["tab", "window"]) +async def test_multiple_top_level_contexts( + bidi_session, + test_alt_origin, + test_origin, + test_page_cross_origin_frame, + type_hint, +): + new_context = await bidi_session.browsing_context.create(type_hint=type_hint) + await bidi_session.browsing_context.navigate( + context=new_context["context"], + url=test_page_cross_origin_frame, + wait="complete", + ) + + evaluate_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(new_context["context"]), + await_promise=False, + ) + + # Create a sandbox + evaluate_in_sandbox_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(new_context["context"], "sandbox"), + await_promise=False, + ) + + result = await bidi_session.script.get_realms(context=new_context["context"]) + recursive_compare( + [ + { + "context": new_context["context"], + "origin": test_origin, + "realm": evaluate_result["realm"], + "type": "window", + }, + { + "context": new_context["context"], + "origin": test_origin, + "realm": evaluate_in_sandbox_result["realm"], + "sandbox": "sandbox", + "type": "window", + }, + ], + result, + ) + + contexts = await bidi_session.browsing_context.get_tree(root=new_context["context"]) + assert len(contexts) == 1 + frames = contexts[0]["children"] + assert len(frames) == 1 + frame_context = frames[0]["context"] + + evaluate_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(frame_context), + await_promise=False, + ) + + # Create a sandbox in iframe + evaluate_in_sandbox_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(frame_context, "sandbox"), + await_promise=False, + ) + + result = await bidi_session.script.get_realms(context=frame_context) + recursive_compare( + [ + { + "context": frame_context, + "origin": test_alt_origin, + "realm": evaluate_result["realm"], + "type": "window", + }, + { + "context": frame_context, + "origin": test_alt_origin, + "realm": evaluate_in_sandbox_result["realm"], + "sandbox": "sandbox", + "type": "window", + }, + ], + result, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/type.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/type.py new file mode 100644 index 0000000000..7a8b4d43b7 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/type.py @@ -0,0 +1,34 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget + +from ... import recursive_compare + +PAGE_ABOUT_BLANK = "about:blank" + + +@pytest.mark.asyncio +# Should be extended when more types are supported +@pytest.mark.parametrize("type", ["window"]) +async def test_type(bidi_session, top_context, type): + result = await bidi_session.script.get_realms(type=type) + + # Evaluate to get realm id + top_context_result = await bidi_session.script.evaluate( + raw_result=True, + expression="1 + 2", + target=ContextTarget(top_context["context"]), + await_promise=False, + ) + + recursive_compare( + [ + { + "context": top_context["context"], + "origin": "null", + "realm": top_context_result["realm"], + "type": type, + } + ], + result, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/message/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/message/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/message/message.py b/testing/web-platform/tests/webdriver/tests/bidi/script/message/message.py new file mode 100644 index 0000000000..4270dcc292 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/message/message.py @@ -0,0 +1,101 @@ +import pytest +from tests.support.sync import AsyncPoll + +from webdriver.bidi.modules.script import ContextTarget +from webdriver.error import TimeoutException + + +pytestmark = pytest.mark.asyncio + +MESSAGE_EVENT = "script.message" + + +async def test_unsubscribe(bidi_session, top_context): + await bidi_session.session.subscribe(events=[MESSAGE_EVENT]) + await bidi_session.session.unsubscribe(events=[MESSAGE_EVENT]) + + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener(MESSAGE_EVENT, on_event) + + await bidi_session.script.call_function( + raw_result=True, + function_declaration="(channel) => channel('foo')", + arguments=[{"type": "channel", "value": {"channel": "channel_name"}}], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + assert len(events) == 0 + + remove_listener() + + +async def test_subscribe(bidi_session, subscribe_events, top_context, wait_for_event): + await subscribe_events(events=[MESSAGE_EVENT]) + + on_script_message = wait_for_event(MESSAGE_EVENT) + result = await bidi_session.script.call_function( + raw_result=True, + function_declaration="(channel) => channel('foo')", + arguments=[{"type": "channel", "value": {"channel": "channel_name"}}], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + event = await on_script_message + + assert event == { + "channel": "channel_name", + "data": {"type": "string", "value": "foo"}, + "source": { + "realm": result["realm"], + "context": top_context["context"], + }, + } + + +async def test_subscribe_to_one_context( + bidi_session, subscribe_events, top_context, new_tab +): + # Subscribe to a specific context + await subscribe_events( + events=[MESSAGE_EVENT], contexts=[top_context["context"]] + ) + + # Track all received script.message events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener(MESSAGE_EVENT, on_event) + + # Send the event in the other context + await bidi_session.script.call_function( + raw_result=True, + function_declaration="(channel) => channel('foo')", + arguments=[{"type": "channel", "value": {"channel": "channel_name"}}], + await_promise=False, + target=ContextTarget(new_tab["context"]), + ) + + # Make sure we didn't receive the event for the new tab + wait = AsyncPoll(bidi_session, timeout=0.5) + with pytest.raises(TimeoutException): + await wait.until(lambda _: len(events) > 0) + + await bidi_session.script.call_function( + raw_result=True, + function_declaration="(channel) => channel('foo')", + arguments=[{"type": "channel", "value": {"channel": "channel_name"}}], + await_promise=False, + target=ContextTarget(top_context["context"]), + ) + + # Make sure we received the event for the right context + assert len(events) == 1 + + remove_listener() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/invalid.py new file mode 100644 index 0000000000..f32c5f57ca --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/invalid.py @@ -0,0 +1,15 @@ +import pytest +import webdriver.bidi.error as error + +pytestmark = pytest.mark.asyncio + + +@pytest.mark.parametrize("script", [None, False, 42, {}, []]) +async def test_params_script_invalid_type(bidi_session, script): + with pytest.raises(error.InvalidArgumentException): + await bidi_session.script.remove_preload_script(script=script), + + +async def test_params_script_invalid_value(bidi_session): + with pytest.raises(error.NoSuchScriptException): + await bidi_session.script.remove_preload_script(script="foo"), diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/remove_preload_script.py b/testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/remove_preload_script.py new file mode 100644 index 0000000000..b92fb31af0 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/remove_preload_script.py @@ -0,0 +1,80 @@ +import pytest +import webdriver.bidi.error as error + +from webdriver.bidi.modules.script import ContextTarget + + +@pytest.mark.asyncio +@pytest.mark.parametrize("type_hint", ["tab", "window"]) +async def test_remove_preload_script(bidi_session, type_hint): + script = await bidi_session.script.add_preload_script( + function_declaration="() => { window.foo='bar'; }" + ) + + new_context = await bidi_session.browsing_context.create(type_hint=type_hint) + + result = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_context["context"]), + await_promise=True, + ) + assert result == {"type": "string", "value": "bar"} + + await bidi_session.script.remove_preload_script(script=script) + + new_tab_2 = await bidi_session.browsing_context.create(type_hint=type_hint) + + # Check that changes from preload script were not applied after script was removed + result_2 = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_tab_2["context"]), + await_promise=True, + ) + assert result_2 == {"type": "undefined"} + + +@pytest.mark.asyncio +async def test_remove_preload_script_twice(bidi_session): + script = await bidi_session.script.add_preload_script( + function_declaration="() => { window.foo='bar'; }" + ) + + await bidi_session.script.remove_preload_script(script=script) + + # Check that we can not remove the same script twice + with pytest.raises(error.NoSuchScriptException): + await bidi_session.script.remove_preload_script(script=script) + + +@pytest.mark.asyncio +async def test_remove_one_of_preload_scripts(bidi_session): + script_1 = await bidi_session.script.add_preload_script( + function_declaration="() => { window.bar='foo'; }" + ) + script_2 = await bidi_session.script.add_preload_script( + function_declaration="() => { window.baz='bar'; }" + ) + + # Remove one of the scripts + await bidi_session.script.remove_preload_script(script=script_1) + + new_tab = await bidi_session.browsing_context.create(type_hint="tab") + + # Check that the first script didn't run + result = await bidi_session.script.evaluate( + expression="window.bar", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + assert result == {"type": "undefined"} + + # Check that the second script still applied the changes to the window + result_2 = await bidi_session.script.evaluate( + expression="window.baz", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + assert result_2 == {"type": "string", "value": "bar"} + + # Clean up the second script + await bidi_session.script.remove_preload_script(script=script_2) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/sandbox.py b/testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/sandbox.py new file mode 100644 index 0000000000..32befe7f05 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/script/remove_preload_script/sandbox.py @@ -0,0 +1,42 @@ +import pytest + +from webdriver.bidi.modules.script import ContextTarget + + +@pytest.mark.asyncio +async def test_remove_preload_script_from_sandbox(bidi_session): + # Add preload script to make changes in window + script_1 = await bidi_session.script.add_preload_script( + function_declaration="() => { window.foo = 1; }", + ) + # Add preload script to make changes in sandbox + script_2 = await bidi_session.script.add_preload_script( + function_declaration="() => { window.bar = 2; }", sandbox="sandbox" + ) + + # Remove first preload script + await bidi_session.script.remove_preload_script( + script=script_1, + ) + # Remove second preload script + await bidi_session.script.remove_preload_script( + script=script_2, + ) + + new_tab = await bidi_session.browsing_context.create(type_hint="tab") + + # Make sure that changes from first preload script were not applied + result_in_window = await bidi_session.script.evaluate( + expression="window.foo", + target=ContextTarget(new_tab["context"]), + await_promise=True, + ) + assert result_in_window == {"type": "undefined"} + + # Make sure that changes from second preload script were not applied + result_in_sandbox = await bidi_session.script.evaluate( + expression="window.bar", + target=ContextTarget(new_tab["context"], "sandbox"), + await_promise=True, + ) + assert result_in_sandbox == {"type": "undefined"} diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/session/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/new/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/session/new/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/new/connect.py b/testing/web-platform/tests/webdriver/tests/bidi/session/new/connect.py new file mode 100644 index 0000000000..7118f77ea0 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/session/new/connect.py @@ -0,0 +1,34 @@ +import pytest +import websockets + +import webdriver + +# classic session to enable bidi capability manually +# Intended to be the first test in this file +@pytest.mark.asyncio +@pytest.mark.capabilities({"webSocketUrl": True}) +async def test_websocket_url_connect(session): + websocket_url = session.capabilities["webSocketUrl"] + async with websockets.connect(websocket_url) as websocket: + await websocket.send("Hello world!") + +# test bidi_session send +@pytest.mark.asyncio +async def test_bidi_session_send(bidi_session, send_blocking_command): + await send_blocking_command("session.status", {}) + +# bidi session following a bidi session with a different capabilities +# to test session recreation +@pytest.mark.asyncio +@pytest.mark.capabilities({"acceptInsecureCerts": True}) +async def test_bidi_session_with_different_capability(bidi_session, + send_blocking_command): + await send_blocking_command("session.status", {}) + +# classic session following a bidi session to test session +# recreation +# Intended to be the last test in this file to make sure +# classic session is not impacted by bidi tests +@pytest.mark.asyncio +def test_classic_after_bidi_session(session): + assert not isinstance(session, webdriver.bidi.BidiSession) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/status/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/session/status/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/status/status.py b/testing/web-platform/tests/webdriver/tests/bidi/session/status/status.py new file mode 100644 index 0000000000..eee102fee7 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/session/status/status.py @@ -0,0 +1,11 @@ +import pytest + + +# Check that session.status can be used. The actual values for the "ready" and +# "message" properties are implementation specific. +@pytest.mark.asyncio +async def test_bidi_session_status(bidi_session, send_blocking_command): + response = await send_blocking_command("session.status", {}) + assert isinstance(response["ready"], bool) + assert isinstance(response["message"], str) + diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/contexts.py b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/contexts.py new file mode 100644 index 0000000000..874acf63d7 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/contexts.py @@ -0,0 +1,277 @@ +import asyncio + +import pytest + +from ... import create_console_api_message, recursive_compare + +# The basic use case of subscribing to all contexts for a single event +# is covered by tests for each event in the dedicated folders. + + +@pytest.mark.asyncio +async def test_subscribe_to_one_context( + bidi_session, subscribe_events, top_context, new_tab, wait_for_event +): + # Subscribe for log events to a specific context + await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]]) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Trigger console event in the another context + await create_console_api_message(bidi_session, new_tab, "text1") + + assert len(events) == 0 + + # Trigger another console event in the observed context + on_entry_added = wait_for_event("log.entryAdded") + expected_text = await create_console_api_message(bidi_session, top_context, "text2") + await on_entry_added + + assert len(events) == 1 + recursive_compare( + { + "text": expected_text, + }, + events[0], + ) + + remove_listener() + + +@pytest.mark.asyncio +async def test_subscribe_to_one_context_twice( + bidi_session, subscribe_events, top_context, wait_for_event +): + # Subscribe twice for log events to a specific context + await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]]) + await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]]) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Trigger a console event in the observed context + on_entry_added = wait_for_event("log.entryAdded") + expected_text = await create_console_api_message(bidi_session, top_context, "text2") + await on_entry_added + + assert len(events) == 1 + recursive_compare( + { + "text": expected_text, + }, + events[0], + ) + + assert len(events) == 1 + + remove_listener() + + +@pytest.mark.asyncio +async def test_subscribe_to_one_context_and_then_to_all( + bidi_session, subscribe_events, top_context, new_tab, wait_for_event +): + # Subscribe for log events to a specific context + await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]]) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Trigger console event in the another context + buffered_event_expected_text = await create_console_api_message( + bidi_session, new_tab, "text1" + ) + + assert len(events) == 0 + + # Trigger another console event in the observed context + on_entry_added = wait_for_event("log.entryAdded") + expected_text = await create_console_api_message(bidi_session, top_context, "text2") + await on_entry_added + + assert len(events) == 1 + recursive_compare( + { + "text": expected_text, + }, + events[0], + ) + + events = [] + + # Subscribe to all contexts + await subscribe_events(events=["log.entryAdded"]) + + # Check that we received the buffered event + assert len(events) == 1 + recursive_compare( + { + "text": buffered_event_expected_text, + }, + events[0], + ) + + # Trigger again events in each context + expected_text = await create_console_api_message(bidi_session, new_tab, "text3") + await on_entry_added + + assert len(events) == 2 + recursive_compare( + { + "text": expected_text, + }, + events[1], + ) + + expected_text = await create_console_api_message(bidi_session, top_context, "text4") + await on_entry_added + + assert len(events) == 3 + recursive_compare( + { + "text": expected_text, + }, + events[2], + ) + + remove_listener() + + +@pytest.mark.asyncio +async def test_subscribe_to_all_context_and_then_to_one_again( + bidi_session, subscribe_events, top_context, new_tab, wait_for_event +): + # Subscribe to all contexts + await subscribe_events(events=["log.entryAdded"]) + # Subscribe to one of the contexts again + await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]]) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Trigger console event in the context to which we tried to subscribe twice + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message(bidi_session, top_context, "text1") + await on_entry_added + + # Make sure we received only one event + assert len(events) == 1 + + remove_listener() + + +@pytest.mark.asyncio +async def test_subscribe_to_top_context_with_iframes( + bidi_session, + subscribe_events, + wait_for_event, + top_context, + test_page_multiple_frames, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_multiple_frames, wait="complete" + ) + + contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"]) + + assert len(contexts[0]["children"]) == 2 + frame_1 = contexts[0]["children"][0] + frame_2 = contexts[0]["children"][1] + + # Subscribe to the top context + await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]]) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Trigger console event in the first iframe + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message(bidi_session, frame_1, "text1") + await on_entry_added + + # Make sure we received the event + assert len(events) == 1 + + # Trigger console event in the second iframe + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message(bidi_session, frame_2, "text2") + await on_entry_added + + # Make sure we received the second event as well + assert len(events) == 2 + + remove_listener() + + +@pytest.mark.asyncio +async def test_subscribe_to_child_context( + bidi_session, + subscribe_events, + wait_for_event, + top_context, + test_page_multiple_frames, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_multiple_frames, wait="complete" + ) + + contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"]) + + assert len(contexts[0]["children"]) == 2 + frame_1 = contexts[0]["children"][0] + frame_2 = contexts[0]["children"][1] + + # Subscribe to the first frame context + await subscribe_events(events=["log.entryAdded"], contexts=[frame_1["context"]]) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Trigger console event in the top context + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message(bidi_session, top_context, "text1") + await on_entry_added + + # Make sure we received the event + assert len(events) == 1 + + # Trigger console event in the second iframe + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message(bidi_session, frame_2, "text2") + await on_entry_added + + # Make sure we received the second event as well + assert len(events) == 2 + + remove_listener() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/events.py b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/events.py new file mode 100644 index 0000000000..a4c20365b3 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/events.py @@ -0,0 +1,138 @@ +import asyncio + +import pytest + +# The basic use case of subscribing globally for a single event +# is covered by tests for each event in the dedicated folders. + + +@pytest.mark.asyncio +async def test_subscribe_to_module(bidi_session, subscribe_events, wait_for_event): + # Subscribe to all browsing context events + await subscribe_events(events=["browsingContext"]) + + # Track all received browsing context events in the events array + events = [] + + async def on_event(method, data): + events.append(method) + + remove_listener_contextCreated = bidi_session.add_event_listener( + "browsingContext.contextCreated", on_event + ) + remove_listener_domContentLoaded = bidi_session.add_event_listener( + "browsingContext.domContentLoaded", on_event + ) + remove_listener_load = bidi_session.add_event_listener( + "browsingContext.load", on_event + ) + + # Wait for the last event + on_entry_added = wait_for_event("browsingContext.load") + await bidi_session.browsing_context.create(type_hint="tab") + await on_entry_added + + assert len(events) == 3 + + remove_listener_contextCreated() + remove_listener_domContentLoaded() + remove_listener_load() + + +@pytest.mark.asyncio +async def test_subscribe_to_one_event_and_then_to_module( + bidi_session, subscribe_events, wait_for_event +): + # Subscribe to one event + await subscribe_events(events=["browsingContext.contextCreated"]) + + # Track all received browsing context events in the events array + events = [] + + async def on_event(method, data): + events.append(method) + + remove_listener_contextCreated = bidi_session.add_event_listener( + "browsingContext.contextCreated", on_event + ) + + on_entry_added = wait_for_event("browsingContext.contextCreated") + await bidi_session.browsing_context.create(type_hint="tab") + await on_entry_added + + assert len(events) == 1 + assert "browsingContext.contextCreated" in events + + # Subscribe to all browsing context events + await subscribe_events(events=["browsingContext"]) + + # Clean up the event list + events = [] + + remove_listener_domContentLoaded = bidi_session.add_event_listener( + "browsingContext.domContentLoaded", on_event + ) + remove_listener_load = bidi_session.add_event_listener( + "browsingContext.load", on_event + ) + + # Wait for the last event + on_entry_added = wait_for_event("browsingContext.load") + await bidi_session.browsing_context.create(type_hint="tab") + await on_entry_added + + # Make sure we didn't receive duplicates + assert len(events) == 3 + + remove_listener_contextCreated() + remove_listener_domContentLoaded() + remove_listener_load() + + +@pytest.mark.asyncio +async def test_subscribe_to_module_and_then_to_one_event_again( + bidi_session, subscribe_events, wait_for_event +): + # Subscribe to all browsing context events + await subscribe_events(events=["browsingContext"]) + + # Track all received browsing context events in the events array + events = [] + + async def on_event(method, data): + events.append(method) + + remove_listener_contextCreated = bidi_session.add_event_listener( + "browsingContext.contextCreated", on_event + ) + remove_listener_domContentLoaded = bidi_session.add_event_listener( + "browsingContext.domContentLoaded", on_event + ) + remove_listener_load = bidi_session.add_event_listener( + "browsingContext.load", on_event + ) + + # Wait for the last event + on_entry_added = wait_for_event("browsingContext.load") + await bidi_session.browsing_context.create(type_hint="tab") + await on_entry_added + + assert len(events) == 3 + + # Subscribe to one event again + await subscribe_events(events=["browsingContext.contextCreated"]) + + # Clean up the event list + events = [] + + # Wait for the last event + on_entry_added = wait_for_event("browsingContext.load") + await bidi_session.browsing_context.create(type_hint="tab") + await on_entry_added + + # Make sure we didn't receive duplicates + assert len(events) == 3 + + remove_listener_contextCreated() + remove_listener_domContentLoaded() + remove_listener_load() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/invalid.py new file mode 100644 index 0000000000..eed6cb1f5b --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/invalid.py @@ -0,0 +1,156 @@ +import asyncio + +import pytest +from webdriver.bidi.error import InvalidArgumentException, NoSuchFrameException + +from ... import create_console_api_message + + +@pytest.mark.asyncio +async def test_params_empty(bidi_session, send_blocking_command): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command("session.subscribe", {}) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("value", [None, True, "foo", 42, {}]) +async def test_params_events_invalid_type(bidi_session, send_blocking_command, value): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command("session.subscribe", {"events": value}) + + +@pytest.mark.asyncio +async def test_params_events_empty(bidi_session): + response = await bidi_session.session.subscribe(events=[]) + assert response == {} + + +@pytest.mark.asyncio +@pytest.mark.parametrize("value", [None, True, 42, [], {}]) +async def test_params_events_value_invalid_type(send_blocking_command, value): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command("session.subscribe", {"events": [value]}) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("value", ["", "foo", "foo.bar", "log.invalidEvent"]) +async def test_params_events_value_invalid_event_name(send_blocking_command, value): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command("session.subscribe", {"events": [value]}) + + +@pytest.mark.asyncio +async def test_params_events_value_valid_and_invalid_event_names( + bidi_session, send_blocking_command, top_context +): + with pytest.raises(InvalidArgumentException): + await send_blocking_command( + "session.subscribe", {"events": ["log.entryAdded", "some.invalidEvent"]} + ) + + # Make sure that we didn't subscribe to log.entryAdded because of the error + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + await create_console_api_message(bidi_session, top_context, "text1") + + # Wait for some time before checking the events array + await asyncio.sleep(0.5) + assert len(events) == 0 + + remove_listener() + + +@pytest.mark.asyncio +@pytest.mark.parametrize("value", [True, "foo", 42, {}]) +async def test_params_contexts_invalid_type(bidi_session, send_blocking_command, value): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command( + "session.subscribe", + { + "events": [], + "contexts": value, + } + ) + + +@pytest.mark.asyncio +async def test_params_contexts_empty(bidi_session): + response = await bidi_session.session.subscribe(events=[], contexts=[]) + assert response == {} + + +@pytest.mark.asyncio +@pytest.mark.parametrize("value", [None, True, 42, [], {}]) +async def test_params_contexts_value_invalid_type(send_blocking_command, value): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command( + "session.subscribe", + { + "events": [], + "contexts": [value], + } + ) + + +@pytest.mark.asyncio +async def test_params_contexts_value_invalid_value(send_blocking_command): + with pytest.raises(NoSuchFrameException): + response = await send_blocking_command( + "session.subscribe", + { + "events": [], + "contexts": ["foo"], + } + ) + + +@pytest.mark.asyncio +async def test_params_contexts_valid_and_invalid_value( + bidi_session, send_blocking_command, top_context +): + with pytest.raises(NoSuchFrameException): + await send_blocking_command( + "session.subscribe", + {"events": ["log.entryAdded"], "contexts": [top_context["context"], "foo"]}, + ) + + # Make sure that we didn't subscribe to log.entryAdded because of error + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + await create_console_api_message(bidi_session, top_context, "text1") + + # Wait for some time before checking the events array + await asyncio.sleep(0.5) + assert len(events) == 0 + + remove_listener() + + +@pytest.mark.asyncio +async def test_subscribe_to_closed_tab(bidi_session, send_blocking_command): + new_tab = await bidi_session.browsing_context.create(type_hint="tab") + await bidi_session.browsing_context.close(context=new_tab["context"]) + + # Try to subscribe to the closed context + with pytest.raises(NoSuchFrameException): + await send_blocking_command( + "session.subscribe", + { + "events": ["log.entryAdded"], + "contexts": [new_tab["context"]] + }, + ) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/contexts.py b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/contexts.py new file mode 100644 index 0000000000..99584987ef --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/contexts.py @@ -0,0 +1,167 @@ +import asyncio + +import pytest + +from ... import create_console_api_message, recursive_compare + +# The basic use case of unsubscribing from all contexts for a single event +# is covered by tests for each event in the dedicated folders. + + +@pytest.mark.asyncio +async def test_unsubscribe_from_one_context( + bidi_session, top_context, new_tab, wait_for_event +): + # Subscribe for log events to multiple contexts + await bidi_session.session.subscribe( + events=["log.entryAdded"], contexts=[top_context["context"], new_tab["context"]] + ) + # Unsubscribe from log events in one of the contexts + await bidi_session.session.unsubscribe( + events=["log.entryAdded"], contexts=[top_context["context"]] + ) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Trigger console event in the unsubscribed context + await create_console_api_message(bidi_session, top_context, "text1") + assert len(events) == 0 + + # Trigger another console event in the still observed context + on_entry_added = wait_for_event("log.entryAdded") + expected_text = await create_console_api_message(bidi_session, new_tab, "text2") + await on_entry_added + + assert len(events) == 1 + recursive_compare( + { + "text": expected_text, + }, + events[0], + ) + + remove_listener() + await bidi_session.session.unsubscribe( + events=["log.entryAdded"], contexts=[new_tab["context"]] + ) + + +@pytest.mark.asyncio +async def test_unsubscribe_from_top_context_with_iframes( + bidi_session, + top_context, + test_page_same_origin_frame, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_same_origin_frame, wait="complete" + ) + + contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"]) + + assert len(contexts[0]["children"]) == 1 + frame = contexts[0]["children"][0] + + # Subscribe and unsubscribe to the top context + await bidi_session.session.subscribe( + events=["log.entryAdded"], contexts=[top_context["context"]] + ) + await bidi_session.session.unsubscribe( + events=["log.entryAdded"], contexts=[top_context["context"]] + ) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Trigger the event in the frame + await create_console_api_message(bidi_session, frame, "text1") + + assert len(events) == 0 + + remove_listener() + + +@pytest.mark.asyncio +async def test_unsubscribe_from_child_context( + bidi_session, + top_context, + test_page_same_origin_frame, +): + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_page_same_origin_frame, wait="complete" + ) + + contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"]) + + assert len(contexts[0]["children"]) == 1 + frame = contexts[0]["children"][0] + + # Subscribe to top context + await bidi_session.session.subscribe( + events=["log.entryAdded"], contexts=[top_context["context"]] + ) + # Unsubscribe from the frame context + await bidi_session.session.unsubscribe( + events=["log.entryAdded"], contexts=[frame["context"]] + ) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Trigger the event in the frame + await create_console_api_message(bidi_session, frame, "text1") + # Trigger the event in the top context + await create_console_api_message(bidi_session, top_context, "text2") + + # Make sure we didn't receive any of the triggered events + assert len(events) == 0 + + remove_listener() + + +@pytest.mark.asyncio +async def test_unsubscribe_from_one_context_after_navigation( + bidi_session, top_context, test_alt_origin +): + await bidi_session.session.subscribe( + events=["log.entryAdded"], contexts=[top_context["context"]] + ) + + await bidi_session.browsing_context.navigate( + context=top_context["context"], url=test_alt_origin, wait="complete" + ) + + await bidi_session.session.unsubscribe( + events=["log.entryAdded"], contexts=[top_context["context"]] + ) + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + # Trigger the event + await create_console_api_message(bidi_session, top_context, "text1") + + # Make sure we successfully unsubscribed + assert len(events) == 0 + + remove_listener() diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/events.py b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/events.py new file mode 100644 index 0000000000..6cf2a896d2 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/events.py @@ -0,0 +1,83 @@ +import asyncio + +import pytest +from tests.support.sync import AsyncPoll +from webdriver.error import TimeoutException + +# The basic use case of unsubscribing globally from a single event +# is covered by tests for each event in the dedicated folders. + + +@pytest.mark.asyncio +async def test_unsubscribe_from_module(bidi_session): + await bidi_session.session.subscribe(events=["browsingContext"]) + await bidi_session.session.unsubscribe(events=["browsingContext"]) + + # Track all received browsing context events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener_contextCreated = bidi_session.add_event_listener( + "browsingContext.contextCreated", on_event + ) + remove_listener_domContentLoaded = bidi_session.add_event_listener( + "browsingContext.domContentLoaded", on_event + ) + remove_listener_load = bidi_session.add_event_listener( + "browsingContext.load", on_event + ) + + await bidi_session.browsing_context.create(type_hint="tab") + + wait = AsyncPoll(bidi_session, timeout=0.5) + with pytest.raises(TimeoutException): + await wait.until(lambda _: len(events) > 0) + + remove_listener_contextCreated() + remove_listener_domContentLoaded() + remove_listener_load() + + +@pytest.mark.asyncio +async def test_subscribe_to_module_unsubscribe_from_one_event( + bidi_session, wait_for_event +): + await bidi_session.session.subscribe(events=["browsingContext"]) + + # Unsubscribe from one event + await bidi_session.session.unsubscribe(events=["browsingContext.domContentLoaded"]) + + # Track all received browsing context events in the events array + events = [] + + async def on_event(method, data): + events.append(method) + + remove_listener_contextCreated = bidi_session.add_event_listener( + "browsingContext.contextCreated", on_event + ) + remove_listener_domContentLoaded = bidi_session.add_event_listener( + "browsingContext.domContentLoaded", on_event + ) + remove_listener_load = bidi_session.add_event_listener( + "browsingContext.load", on_event + ) + + # Wait for the last event + on_entry_added = wait_for_event("browsingContext.load") + await bidi_session.browsing_context.create(type_hint="tab") + await on_entry_added + + # Make sure we didn't receive browsingContext.domContentLoaded event + assert len(events) == 2 + assert "browsingContext.domContentLoaded" not in events + + remove_listener_contextCreated() + remove_listener_domContentLoaded() + remove_listener_load() + + # Unsubscribe from the rest of the events + await bidi_session.session.unsubscribe(events=["browsingContext.contextCreated"]) + await bidi_session.session.unsubscribe(events=["browsingContext.load"]) diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/invalid.py new file mode 100644 index 0000000000..a4fa34e8a1 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/invalid.py @@ -0,0 +1,234 @@ +import pytest +from webdriver.bidi.error import InvalidArgumentException, NoSuchFrameException + +from ... import create_console_api_message + + +@pytest.mark.asyncio +async def test_params_empty(bidi_session, send_blocking_command): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command("session.unsubscribe", {}) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("value", [None, True, "foo", 42, {}]) +async def test_params_events_invalid_type(bidi_session, send_blocking_command, value): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command("session.unsubscribe", {"events": value}) + + +@pytest.mark.asyncio +async def test_params_events_empty(bidi_session): + response = await bidi_session.session.unsubscribe(events=[]) + assert response == {} + + +@pytest.mark.asyncio +@pytest.mark.parametrize("value", [None, True, 42, [], {}]) +async def test_params_events_value_invalid_type(send_blocking_command, value): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command("session.unsubscribe", {"events": [value]}) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("value", ["", "foo", "foo.bar"]) +async def test_params_events_value_invalid_event_name(send_blocking_command, value): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command("session.unsubscribe", {"events": [value]}) + + +@pytest.mark.asyncio +async def test_params_events_value_valid_and_invalid_event_name( + bidi_session, subscribe_events, send_blocking_command, wait_for_event, top_context +): + # Subscribe to a valid event + await subscribe_events(events=["log.entryAdded"]) + + # Try to unsubscribe from the valid and an invalid event + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command( + "session.unsubscribe", {"events": ["log.entryAdded", "some.invalidEvent"]} + ) + + # Make sure that we didn't unsubscribe from log.entryAdded because of the error + # and events are still coming + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message(bidi_session, top_context, "text1") + await on_entry_added + + assert len(events) == 1 + + remove_listener() + + +@pytest.mark.asyncio +async def test_unsubscribe_from_one_event_and_then_from_module( + bidi_session, subscribe_events, send_blocking_command +): + await subscribe_events(events=["browsingContext"]) + + # Unsubscribe from one event + await bidi_session.session.unsubscribe(events=["browsingContext.domContentLoaded"]) + + # Try to unsubscribe from all events + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command( + "session.unsubscribe", {"events": ["browsingContext"]} + ) + + # Unsubscribe from the rest of the events + await bidi_session.session.unsubscribe(events=["browsingContext.contextCreated"]) + await bidi_session.session.unsubscribe(events=["browsingContext.load"]) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("value", [True, "foo", 42, {}]) +async def test_params_contexts_invalid_type(bidi_session, send_blocking_command, value): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command( + "session.unsubscribe", + { + "events": [], + "contexts": value, + } + ) + + +@pytest.mark.asyncio +async def test_params_contexts_empty(bidi_session): + response = await bidi_session.session.unsubscribe(events=[], contexts=[]) + assert response == {} + + +@pytest.mark.asyncio +@pytest.mark.parametrize("value", [None, True, 42, [], {}]) +async def test_params_contexts_value_invalid_type(send_blocking_command, value): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command( + "session.unsubscribe", + { + "events": [], + "contexts": [value], + } + ) + + +@pytest.mark.asyncio +async def test_params_contexts_value_invalid_value(send_blocking_command): + with pytest.raises(NoSuchFrameException): + response = await send_blocking_command( + "session.unsubscribe", + { + "events": [], + "contexts": ["foo"], + }, + ) + + +@pytest.mark.asyncio +async def test_params_contexts_value_valid_and_invalid_value( + bidi_session, subscribe_events, send_blocking_command, wait_for_event, top_context +): + # Subscribe to a valid context + await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]]) + + # Try to unsubscribe from the valid and an invalid context + with pytest.raises(NoSuchFrameException): + response = await send_blocking_command( + "session.unsubscribe", + {"events": ["log.entryAdded"], "contexts": [top_context["context"], "foo"]}, + ) + + # Make sure that we didn't unsubscribe from the valid context because of the error + # and events are still coming + + # Track all received log.entryAdded events in the events array + events = [] + + async def on_event(method, data): + events.append(data) + + remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event) + + on_entry_added = wait_for_event("log.entryAdded") + await create_console_api_message(bidi_session, top_context, "text1") + await on_entry_added + + assert len(events) == 1 + + remove_listener() + + +@pytest.mark.asyncio +async def test_unsubscribe_from_closed_tab( + bidi_session, subscribe_events, send_blocking_command +): + new_tab = await bidi_session.browsing_context.create(type_hint="tab") + # Subscribe to a new context + await subscribe_events(events=["log.entryAdded"], contexts=[new_tab["context"]]) + + await bidi_session.browsing_context.close(context=new_tab["context"]) + + # Try to unsubscribe from the closed context + with pytest.raises(NoSuchFrameException): + response = await send_blocking_command( + "session.unsubscribe", + {"events": ["log.entryAdded"], "contexts": [new_tab["context"]]}, + ) + + +@pytest.mark.asyncio +async def test_params_unsubscribe_globally_without_subscription(send_blocking_command): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command( + "session.unsubscribe", {"events": ["log.entryAdded"]} + ) + + +@pytest.mark.asyncio +async def test_params_unsubscribe_globally_with_individual_subscription( + subscribe_events, send_blocking_command, top_context +): + # Subscribe to one context + await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]]) + + # Try to unsubscribe globally + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command( + "session.unsubscribe", {"events": ["log.entryAdded"]} + ) + + +@pytest.mark.asyncio +async def test_params_unsubscribe_from_one_context_without_subscription( + send_blocking_command, top_context +): + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command( + "session.unsubscribe", + {"events": ["log.entryAdded"], "contexts": [top_context["context"]]}, + ) + + +@pytest.mark.asyncio +async def test_params_unsubscribe_from_one_context_with_global_subscription( + subscribe_events, send_blocking_command, top_context +): + # Subscribe to all contexts + await subscribe_events(events=["log.entryAdded"]) + + # Try to unsubscribe from one context + with pytest.raises(InvalidArgumentException): + response = await send_blocking_command( + "session.unsubscribe", + {"events": ["log.entryAdded"], "contexts": [top_context["context"]]}, + ) diff --git a/testing/web-platform/tests/webdriver/tests/classic/accept_alert/__init__.py b/testing/web-platform/tests/webdriver/tests/classic/accept_alert/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/classic/accept_alert/accept.py b/testing/web-platform/tests/webdriver/tests/classic/accept_alert/accept.py new file mode 100644 index 0000000000..b83477e5ca --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/accept_alert/accept.py @@ -0,0 +1,110 @@ +import pytest + +from webdriver.error import NoSuchAlertException + +from tests.support.asserts import assert_error, assert_success +from tests.support.helpers import wait_for_new_handle +from tests.support.sync import Poll + + +def accept_alert(session): + return session.transport.send( + "POST", "session/{session_id}/alert/accept".format(**vars(session))) + + +def test_null_response_value(session, inline): + session.url = inline("") + + response = accept_alert(session) + value = assert_success(response) + assert value is None + + +def test_no_top_level_browsing_context(session, closed_window): + response = accept_alert(session) + assert_error(response, "no such window") + + +def test_no_browsing_context(session, closed_frame): + response = accept_alert(session) + assert_error(response, "no such alert") + + +def test_no_user_prompt(session): + response = accept_alert(session) + assert_error(response, "no such alert") + + +def test_accept_alert(session, inline): + session.url = inline("") + + response = accept_alert(session) + assert_success(response) + + with pytest.raises(NoSuchAlertException): + session.alert.text + + +def test_accept_confirm(session, inline): + session.url = inline("") + + response = accept_alert(session) + assert_success(response) + + with pytest.raises(NoSuchAlertException): + session.alert.text + + assert session.execute_script("return window.result") is True + + +def test_accept_prompt(session, inline): + session.url = inline(""" + + """) + + response = accept_alert(session) + assert_success(response) + + with pytest.raises(NoSuchAlertException): + session.alert.text + + assert session.execute_script("return window.result") == "Federer" + + +def test_unexpected_alert(session): + session.execute_script("window.setTimeout(function() { window.alert('Hello'); }, 100);") + wait = Poll( + session, + timeout=5, + ignored_exceptions=NoSuchAlertException, + message="No user prompt with text 'Hello' detected") + wait.until(lambda s: s.alert.text == "Hello") + + response = accept_alert(session) + assert_success(response) + + with pytest.raises(NoSuchAlertException): + session.alert.text + + +def test_accept_in_popup_window(session, inline): + orig_handles = session.handles + + session.url = inline(""" + + """) + button = session.find.css("button", all=False) + button.click() + + session.window_handle = wait_for_new_handle(session, orig_handles) + session.url = inline(""" + + """) + + response = accept_alert(session) + assert_success(response) + + with pytest.raises(NoSuchAlertException): + session.alert.text diff --git a/testing/web-platform/tests/webdriver/tests/classic/add_cookie/__init__.py b/testing/web-platform/tests/webdriver/tests/classic/add_cookie/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/classic/add_cookie/add.py b/testing/web-platform/tests/webdriver/tests/classic/add_cookie/add.py new file mode 100644 index 0000000000..3a19432fc6 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/add_cookie/add.py @@ -0,0 +1,286 @@ +import pytest + +from datetime import datetime, timedelta + +from webdriver.transport import Response + +from tests.support.asserts import assert_error, assert_success +from tests.support.helpers import clear_all_cookies + + +def add_cookie(session, cookie): + return session.transport.send( + "POST", "session/{session_id}/cookie".format(**vars(session)), + {"cookie": cookie}) + + +def test_null_parameter_value(session, http): + path = "/session/{session_id}/cookie".format(**vars(session)) + with http.post(path, None) as response: + assert_error(Response.from_http(response), "invalid argument") + + +def test_null_response_value(session, url): + new_cookie = { + "name": "hello", + "value": "world", + } + + session.url = url("/common/blank.html") + clear_all_cookies(session) + + response = add_cookie(session, new_cookie) + value = assert_success(response) + assert value is None + + +def test_no_top_browsing_context(session, closed_window): + new_cookie = { + "name": "hello", + "value": "world", + } + + response = add_cookie(session, new_cookie) + assert_error(response, "no such window") + + +def test_no_browsing_context(session, closed_frame): + new_cookie = { + "name": "hello", + "value": "world", + } + + response = add_cookie(session, new_cookie) + assert_error(response, "no such window") + + +@pytest.mark.parametrize( + "page", + [ + "about:blank", + "blob:foo/bar", + "data:text/html;charset=utf-8,

foo

", + "file:///foo/bar", + "ftp://example.org", + "javascript:foo", + "ws://example.org", + "wss://example.org", + ], + ids=[ + "about", + "blob", + "data", + "file", + "ftp", + "javascript", + "websocket", + "secure websocket", + ], +) +def test_cookie_unsupported_scheme(session, page): + new_cookie = { + "name": "hello", + "value": "world", + "domain": page, + "path": "/", + "httpOnly": False, + "secure": False + } + + result = add_cookie(session, new_cookie) + assert_error(result, "invalid cookie domain") + + +def test_add_domain_cookie(session, url, server_config): + new_cookie = { + "name": "hello", + "value": "world", + "domain": server_config["browser_host"], + "path": "/", + "httpOnly": False, + "secure": False + } + + session.url = url("/common/blank.html") + clear_all_cookies(session) + + result = add_cookie(session, new_cookie) + assert_success(result) + + cookie = session.cookies("hello") + assert "domain" in cookie + assert isinstance(cookie["domain"], str) + assert "name" in cookie + assert isinstance(cookie["name"], str) + assert "value" in cookie + assert isinstance(cookie["value"], str) + + assert cookie["name"] == "hello" + assert cookie["value"] == "world" + assert cookie["domain"] == server_config["browser_host"] or \ + cookie["domain"] == ".%s" % server_config["browser_host"] + + +def test_add_cookie_for_ip(session, url, server_config, configuration): + new_cookie = { + "name": "hello", + "value": "world", + "domain": "127.0.0.1", + "path": "/", + "httpOnly": False, + "secure": False + } + + session.url = "http://127.0.0.1:%s/common/blank.html" % (server_config["ports"]["http"][0]) + clear_all_cookies(session) + + result = add_cookie(session, new_cookie) + assert_success(result) + + cookie = session.cookies("hello") + assert "name" in cookie + assert isinstance(cookie["name"], str) + assert "value" in cookie + assert isinstance(cookie["value"], str) + assert "domain" in cookie + assert isinstance(cookie["domain"], str) + + assert cookie["name"] == "hello" + assert cookie["value"] == "world" + assert cookie["domain"] == "127.0.0.1" + + +def test_add_non_session_cookie(session, url): + a_day_from_now = int( + (datetime.utcnow() + timedelta(days=1) - datetime.utcfromtimestamp(0)).total_seconds()) + + new_cookie = { + "name": "hello", + "value": "world", + "expiry": a_day_from_now + } + + session.url = url("/common/blank.html") + clear_all_cookies(session) + + result = add_cookie(session, new_cookie) + assert_success(result) + + cookie = session.cookies("hello") + assert "name" in cookie + assert isinstance(cookie["name"], str) + assert "value" in cookie + assert isinstance(cookie["value"], str) + assert "expiry" in cookie + assert isinstance(cookie["expiry"], int) + + assert cookie["name"] == "hello" + assert cookie["value"] == "world" + assert cookie["expiry"] == a_day_from_now + + +def test_add_session_cookie(session, url): + new_cookie = { + "name": "hello", + "value": "world" + } + + session.url = url("/common/blank.html") + clear_all_cookies(session) + + result = add_cookie(session, new_cookie) + assert_success(result) + + cookie = session.cookies("hello") + assert "name" in cookie + assert isinstance(cookie["name"], str) + assert "value" in cookie + assert isinstance(cookie["value"], str) + if "expiry" in cookie: + assert cookie.get("expiry") is None + + assert cookie["name"] == "hello" + assert cookie["value"] == "world" + + +def test_add_session_cookie_with_leading_dot_character_in_domain(session, url, server_config): + new_cookie = { + "name": "hello", + "value": "world", + "domain": ".%s" % server_config["browser_host"] + } + + session.url = url("/common/blank.html") + clear_all_cookies(session) + + result = add_cookie(session, new_cookie) + assert_success(result) + + cookie = session.cookies("hello") + assert "name" in cookie + assert isinstance(cookie["name"], str) + assert "value" in cookie + assert isinstance(cookie["value"], str) + assert "domain" in cookie + assert isinstance(cookie["domain"], str) + + assert cookie["name"] == "hello" + assert cookie["value"] == "world" + assert cookie["domain"] == server_config["browser_host"] or \ + cookie["domain"] == ".%s" % server_config["browser_host"] + + +@pytest.mark.parametrize("same_site", ["None", "Lax", "Strict"]) +def test_add_cookie_with_valid_samesite_flag(session, url, same_site): + new_cookie = { + "name": "hello", + "value": "world", + "sameSite": same_site + } + + session.url = url("/common/blank.html") + clear_all_cookies(session) + + result = add_cookie(session, new_cookie) + assert_success(result) + + cookie = session.cookies("hello") + assert "name" in cookie + assert isinstance(cookie["name"], str) + assert "value" in cookie + assert isinstance(cookie["value"], str) + assert "sameSite" in cookie + assert isinstance(cookie["sameSite"], str) + + assert cookie["name"] == "hello" + assert cookie["value"] == "world" + assert cookie["sameSite"] == same_site + + +def test_add_cookie_with_invalid_samesite_flag(session, url): + new_cookie = { + "name": "hello", + "value": "world", + "sameSite": "invalid" + } + + session.url = url("/common/blank.html") + clear_all_cookies(session) + + response = add_cookie(session, new_cookie) + assert_error(response, "invalid argument") + + +@pytest.mark.parametrize("same_site", [False, 12, dict()]) +def test_add_cookie_with_invalid_samesite_type(session, url, same_site): + new_cookie = { + "name": "hello", + "value": "world", + "sameSite": same_site + } + + session.url = url("/common/blank.html") + clear_all_cookies(session) + + response = add_cookie(session, new_cookie) + assert_error(response, "invalid argument") diff --git a/testing/web-platform/tests/webdriver/tests/classic/add_cookie/user_prompts.py b/testing/web-platform/tests/webdriver/tests/classic/add_cookie/user_prompts.py new file mode 100644 index 0000000000..f58aacd02a --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/add_cookie/user_prompts.py @@ -0,0 +1,137 @@ +# META: timeout=long + +import pytest + +from webdriver.error import NoSuchCookieException + +from tests.support.asserts import assert_dialog_handled, assert_error, assert_success + + +def add_cookie(session, cookie): + return session.transport.send( + "POST", "session/{session_id}/cookie".format(**vars(session)), + {"cookie": cookie}) + + +@pytest.fixture +def check_user_prompt_closed_without_exception(session, url, create_dialog): + def check_user_prompt_closed_without_exception(dialog_type, retval): + new_cookie = { + "name": "foo", + "value": "bar", + } + + session.url = url("/common/blank.html") + + create_dialog(dialog_type, text=dialog_type) + + response = add_cookie(session, new_cookie) + assert_success(response) + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + assert session.cookies("foo") + + return check_user_prompt_closed_without_exception + + +@pytest.fixture +def check_user_prompt_closed_with_exception(session, url, create_dialog): + def check_user_prompt_closed_with_exception(dialog_type, retval): + new_cookie = { + "name": "foo", + "value": "bar", + } + + session.url = url("/common/blank.html") + + create_dialog(dialog_type, text=dialog_type) + + response = add_cookie(session, new_cookie) + assert_error(response, "unexpected alert open") + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + with pytest.raises(NoSuchCookieException): + assert session.cookies("foo") + + return check_user_prompt_closed_with_exception + + +@pytest.fixture +def check_user_prompt_not_closed_but_exception(session, url, create_dialog): + def check_user_prompt_not_closed_but_exception(dialog_type): + new_cookie = { + "name": "foo", + "value": "bar", + } + + session.url = url("/common/blank.html") + + create_dialog(dialog_type, text=dialog_type) + + response = add_cookie(session, new_cookie) + assert_error(response, "unexpected alert open") + + assert session.alert.text == dialog_type + session.alert.dismiss() + + with pytest.raises(NoSuchCookieException): + assert session.cookies("foo") + + return check_user_prompt_not_closed_but_exception + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", True), + ("prompt", ""), +]) +def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval): + check_user_prompt_closed_without_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", True), + ("prompt", ""), +]) +def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval): + check_user_prompt_closed_without_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"}) +@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"]) +def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type): + check_user_prompt_not_closed_but_exception(dialog_type) + + +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_default(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) diff --git a/testing/web-platform/tests/webdriver/tests/classic/back/__init__.py b/testing/web-platform/tests/webdriver/tests/classic/back/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/classic/back/back.py b/testing/web-platform/tests/webdriver/tests/classic/back/back.py new file mode 100644 index 0000000000..21e8498ccd --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/back/back.py @@ -0,0 +1,169 @@ +import pytest +from webdriver import error + +from tests.support.asserts import assert_error, assert_success + + +def back(session): + return session.transport.send( + "POST", "session/{session_id}/back".format(**vars(session))) + + +def test_null_response_value(session, inline): + session.url = inline("
") + session.url = inline("

") + + response = back(session) + value = assert_success(response) + assert value is None + + +def test_no_top_browsing_context(session, closed_window): + response = back(session) + assert_error(response, "no such window") + + +def test_no_browsing_context(session, closed_frame): + response = back(session) + assert_success(response) + + +def test_no_browsing_history(session): + response = back(session) + assert_success(response) + + +def test_basic(session, inline): + url = inline("

") + + session.url = url + session.url = inline("
") + element = session.find.css("#bar", all=False) + + response = back(session) + assert_success(response) + + with pytest.raises(error.StaleElementReferenceException): + element.property("id") + + assert session.url == url + assert session.find.css("#foo", all=False) + + +def test_data_urls(session, inline): + test_pages = [ + inline("

"), + inline("

"), + ] + + for page in test_pages: + session.url = page + assert session.url == test_pages[1] + + response = back(session) + assert_success(response) + assert session.url == test_pages[0] + + +def test_dismissed_beforeunload(session, inline): + url_beforeunload = inline(""" + + + """) + + session.url = inline("

") + session.url = url_beforeunload + + element = session.find.css("input", all=False) + element.send_keys("bar") + + response = back(session) + assert_success(response) + + assert session.url != url_beforeunload + + +def test_fragments(session, url): + test_pages = [ + url("/common/blank.html"), + url("/common/blank.html#1234"), + url("/common/blank.html#5678"), + ] + + for page in test_pages: + session.url = page + assert session.url == test_pages[2] + + response = back(session) + assert_success(response) + assert session.url == test_pages[1] + + response = back(session) + assert_success(response) + assert session.url == test_pages[0] + + +def test_history_pushstate(session, inline): + pushstate_page = inline(""" + + click + """) + + session.url = pushstate_page + session.find.css("a", all=False).click() + + assert session.url == "{}#pushstate".format(pushstate_page) + assert session.execute_script("return history.state;") == {"foo": "bar"} + + response = back(session) + assert_success(response) + + assert session.url == pushstate_page + assert session.execute_script("return history.state;") is None + + +def test_removed_iframe(session, url, inline): + page = inline("

foo") + + session.url = page + session.url = url("/webdriver/tests/support/html/frames_no_bfcache.html") + + subframe = session.find.css("#sub-frame", all=False) + session.switch_frame(subframe) + + response = back(session) + assert_success(response) + + assert session.url == page + + +# Capability needed as long as no valid certificate is available: +# https://github.com/web-platform-tests/wpt/issues/28847 +@pytest.mark.capabilities({"acceptInsecureCerts": True}) +def test_cross_origin(session, url): + base_path = ("/webdriver/tests/support/html/subframe.html" + + "?pipe=header(Cross-Origin-Opener-Policy,same-origin") + first_page = url(base_path, protocol="https") + second_page = url(base_path, protocol="https", domain="alt") + + session.url = first_page + session.url = second_page + + elem = session.find.css("#delete", all=False) + + response = back(session) + assert_success(response) + + assert session.url == first_page + + with pytest.raises(error.StaleElementReferenceException): + elem.click() + elem = session.find.css("#delete", all=False) diff --git a/testing/web-platform/tests/webdriver/tests/classic/back/conftest.py b/testing/web-platform/tests/webdriver/tests/classic/back/conftest.py new file mode 100644 index 0000000000..bd5db0cfeb --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/back/conftest.py @@ -0,0 +1,19 @@ +import pytest + +from webdriver.error import NoSuchWindowException + + +@pytest.fixture(name="session") +def fixture_session(capabilities, session): + """Prevent re-using existent history by running the test in a new window.""" + original_handle = session.window_handle + session.window_handle = session.new_window() + + yield session + + try: + session.window.close() + except NoSuchWindowException: + pass + + session.window_handle = original_handle diff --git a/testing/web-platform/tests/webdriver/tests/classic/back/user_prompts.py b/testing/web-platform/tests/webdriver/tests/classic/back/user_prompts.py new file mode 100644 index 0000000000..9d04f0f4ab --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/back/user_prompts.py @@ -0,0 +1,118 @@ +# META: timeout=long + +import pytest + +from tests.support.asserts import assert_dialog_handled, assert_error, assert_success + + +def back(session): + return session.transport.send( + "POST", "session/{session_id}/back".format(**vars(session))) + + +@pytest.fixture +def pages(session, inline): + pages = [ + inline("

"), + inline("

"), + ] + + for page in pages: + session.url = page + + return pages + + +@pytest.fixture +def check_user_prompt_closed_without_exception(session, create_dialog, pages): + def check_user_prompt_closed_without_exception(dialog_type, retval): + create_dialog(dialog_type, text=dialog_type) + + response = back(session) + assert_success(response) + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + assert session.url == pages[0] + + return check_user_prompt_closed_without_exception + + +@pytest.fixture +def check_user_prompt_closed_with_exception(session, create_dialog, pages): + def check_user_prompt_closed_with_exception(dialog_type, retval): + create_dialog(dialog_type, text=dialog_type) + + response = back(session) + assert_error(response, "unexpected alert open") + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + assert session.url == pages[1] + + return check_user_prompt_closed_with_exception + + +@pytest.fixture +def check_user_prompt_not_closed_but_exception(session, create_dialog, pages): + def check_user_prompt_not_closed_but_exception(dialog_type): + create_dialog(dialog_type, text=dialog_type) + + response = back(session) + assert_error(response, "unexpected alert open") + + assert session.alert.text == dialog_type + session.alert.dismiss() + + assert session.url == pages[1] + + return check_user_prompt_not_closed_but_exception + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"}) +@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"]) +def test_accept(check_user_prompt_closed_without_exception, dialog_type): + # retval not testable for confirm and prompt because window is gone + check_user_prompt_closed_without_exception(dialog_type, None) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", True), + ("prompt", ""), +]) +def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"}) +@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"]) +def test_dismiss(check_user_prompt_closed_without_exception, dialog_type): + # retval not testable for confirm and prompt because window is gone + check_user_prompt_closed_without_exception(dialog_type, None) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"}) +@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"]) +def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type): + check_user_prompt_not_closed_but_exception(dialog_type) + + +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_default(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) diff --git a/testing/web-platform/tests/webdriver/tests/classic/close_window/__init__.py b/testing/web-platform/tests/webdriver/tests/classic/close_window/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/classic/close_window/close.py b/testing/web-platform/tests/webdriver/tests/classic/close_window/close.py new file mode 100644 index 0000000000..7b382fa9bb --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/close_window/close.py @@ -0,0 +1,102 @@ +import pytest +from webdriver import error + +from tests.support.asserts import assert_error, assert_success + + +def close(session): + return session.transport.send( + "DELETE", "session/{session_id}/window".format(**vars(session))) + + +def test_no_top_browsing_context(session, closed_window): + response = close(session) + assert_error(response, "no such window") + + +def test_no_browsing_context(session, url): + new_handle = session.new_window() + + session.url = url("/webdriver/tests/support/html/frames.html") + + subframe = session.find.css("#sub-frame", all=False) + session.switch_frame(subframe) + + frame = session.find.css("#delete-frame", all=False) + session.switch_frame(frame) + + button = session.find.css("#remove-parent", all=False) + button.click() + + response = close(session) + handles = assert_success(response) + assert handles == [new_handle] + + +def test_close_browsing_context(session): + original_handles = session.handles + + new_handle = session.new_window() + session.window_handle = new_handle + + response = close(session) + handles = assert_success(response, original_handles) + assert session.handles == original_handles + assert new_handle not in handles + + +def test_close_browsing_context_with_dismissed_beforeunload_prompt(session, inline): + original_handles = session.handles + + new_handle = session.new_window() + session.window_handle = new_handle + + session.url = inline(""" + + + """) + + session.find.css("input", all=False).send_keys("foo") + + response = close(session) + handles = assert_success(response, original_handles) + assert session.handles == original_handles + assert new_handle not in handles + + # A beforeunload prompt has to be automatically dismissed + with pytest.raises(error.NoSuchWindowException): + session.alert.text + + +def test_close_last_browsing_context(session): + assert len(session.handles) == 1 + response = close(session) + + assert_success(response, []) + + # With no more open top-level browsing contexts, the session is closed. + session.session_id = None + + +def test_element_usage_after_closing_browsing_context(session, inline): + session.url = inline("

foo") + a = session.find.css("p", all=False) + first = session.window_handle + + second = session.new_window(type_hint="tab") + session.window_handle = second + + session.url = inline("

other") + b = session.find.css("p", all=False) + + session.window_handle = first + response = close(session) + assert_success(response) + assert len(session.handles) == 1 + + session.window_handle = second + assert b.attribute("id") == "b" diff --git a/testing/web-platform/tests/webdriver/tests/classic/close_window/user_prompts.py b/testing/web-platform/tests/webdriver/tests/classic/close_window/user_prompts.py new file mode 100644 index 0000000000..c0f9cc7610 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/close_window/user_prompts.py @@ -0,0 +1,119 @@ +# META: timeout=long + +import pytest + +from tests.support.asserts import assert_dialog_handled, assert_error, assert_success + + +def close(session): + return session.transport.send( + "DELETE", "session/{session_id}/window".format(**vars(session))) + + +@pytest.fixture +def check_user_prompt_closed_without_exception(session, create_dialog): + def check_user_prompt_closed_without_exception(dialog_type, retval): + original_handle = session.window_handle + new_handle = session.new_window() + session.window_handle = new_handle + + create_dialog(dialog_type, text=dialog_type) + + response = close(session) + assert_success(response) + + # Asserting that the dialog was handled requires valid top-level browsing + # context, so we must switch to the original window. + session.window_handle = original_handle + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + assert new_handle not in session.handles + + return check_user_prompt_closed_without_exception + + +@pytest.fixture +def check_user_prompt_closed_with_exception(session, create_dialog): + def check_user_prompt_closed_with_exception(dialog_type, retval): + new_handle = session.new_window() + session.window_handle = new_handle + + create_dialog(dialog_type, text=dialog_type) + + response = close(session) + assert_error(response, "unexpected alert open") + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + assert new_handle in session.handles + + return check_user_prompt_closed_with_exception + + +@pytest.fixture +def check_user_prompt_not_closed_but_exception(session, create_dialog): + def check_user_prompt_not_closed_but_exception(dialog_type): + new_handle = session.new_window() + session.window_handle = new_handle + + create_dialog(dialog_type, text=dialog_type) + + response = close(session) + assert_error(response, "unexpected alert open") + + assert session.alert.text == dialog_type + session.alert.dismiss() + + assert new_handle in session.handles + + return check_user_prompt_not_closed_but_exception + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"}) +@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"]) +def test_accept(check_user_prompt_closed_without_exception, dialog_type): + # retval not testable for confirm and prompt because window is gone + check_user_prompt_closed_without_exception(dialog_type, None) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", True), + ("prompt", ""), +]) +def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"}) +@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"]) +def test_dismiss(check_user_prompt_closed_without_exception, dialog_type): + # retval not testable for confirm and prompt because window is gone + check_user_prompt_closed_without_exception(dialog_type, None) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"}) +@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"]) +def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type): + check_user_prompt_not_closed_but_exception(dialog_type) + + +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_default(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) diff --git a/testing/web-platform/tests/webdriver/tests/classic/delete_all_cookies/__init__.py b/testing/web-platform/tests/webdriver/tests/classic/delete_all_cookies/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/classic/delete_all_cookies/delete.py b/testing/web-platform/tests/webdriver/tests/classic/delete_all_cookies/delete.py new file mode 100644 index 0000000000..86d66561b0 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/delete_all_cookies/delete.py @@ -0,0 +1,22 @@ +from tests.support.asserts import assert_error, assert_success + + +def delete_all_cookies(session): + return session.transport.send( + "DELETE", "/session/{session_id}/cookie".format(**vars(session))) + + +def test_null_response_value(session, url): + response = delete_all_cookies(session) + value = assert_success(response) + assert value is None + + +def test_no_top_browsing_context(session, closed_window): + response = delete_all_cookies(session) + assert_error(response, "no such window") + + +def test_no_browsing_context(session, closed_frame): + response = delete_all_cookies(session) + assert_error(response, "no such window") diff --git a/testing/web-platform/tests/webdriver/tests/classic/delete_all_cookies/user_prompts.py b/testing/web-platform/tests/webdriver/tests/classic/delete_all_cookies/user_prompts.py new file mode 100644 index 0000000000..dca4f3c8bf --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/delete_all_cookies/user_prompts.py @@ -0,0 +1,119 @@ +# META: timeout=long + +import pytest + +from webdriver.error import NoSuchCookieException + +from tests.support.asserts import assert_dialog_handled, assert_error, assert_success + + +def delete_all_cookies(session): + return session.transport.send( + "DELETE", "/session/{session_id}/cookie".format(**vars(session))) + + +@pytest.fixture +def check_user_prompt_closed_without_exception(session, create_dialog, create_cookie): + def check_user_prompt_closed_without_exception(dialog_type, retval): + create_cookie("foo", value="bar", path="/common/blank.html") + + create_dialog(dialog_type, text=dialog_type) + + response = delete_all_cookies(session) + assert_success(response) + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + assert session.cookies() == [] + + return check_user_prompt_closed_without_exception + + +@pytest.fixture +def check_user_prompt_closed_with_exception(session, create_dialog, create_cookie): + def check_user_prompt_closed_with_exception(dialog_type, retval): + create_cookie("foo", value="bar", path="/common/blank.html") + + create_dialog(dialog_type, text=dialog_type) + + response = delete_all_cookies(session) + assert_error(response, "unexpected alert open") + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + assert session.cookies() != [] + + return check_user_prompt_closed_with_exception + + +@pytest.fixture +def check_user_prompt_not_closed_but_exception(session, create_dialog, create_cookie): + def check_user_prompt_not_closed_but_exception(dialog_type): + create_cookie("foo", value="bar", path="/common/blank.html") + + create_dialog(dialog_type, text=dialog_type) + + response = delete_all_cookies(session) + assert_error(response, "unexpected alert open") + + assert session.alert.text == dialog_type + session.alert.dismiss() + + assert session.cookies() != [] + + return check_user_prompt_not_closed_but_exception + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", True), + ("prompt", ""), +]) +def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval): + check_user_prompt_closed_without_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", True), + ("prompt", ""), +]) +def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval): + check_user_prompt_closed_without_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"}) +@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"]) +def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type): + check_user_prompt_not_closed_but_exception(dialog_type) + + +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_default(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) diff --git a/testing/web-platform/tests/webdriver/tests/classic/delete_cookie/__init__.py b/testing/web-platform/tests/webdriver/tests/classic/delete_cookie/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/classic/delete_cookie/delete.py b/testing/web-platform/tests/webdriver/tests/classic/delete_cookie/delete.py new file mode 100644 index 0000000000..4b37c0453b --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/delete_cookie/delete.py @@ -0,0 +1,29 @@ +from tests.support.asserts import assert_error, assert_success + + +def delete_cookie(session, name): + return session.transport.send( + "DELETE", "/session/{session_id}/cookie/{name}".format( + session_id=session.session_id, + name=name)) + + +def test_null_response_value(session, url): + response = delete_cookie(session, "foo") + value = assert_success(response) + assert value is None + + +def test_no_top_browsing_context(session, closed_window): + response = delete_cookie(session, "foo") + assert_error(response, "no such window") + + +def test_no_browsing_context(session, closed_frame): + response = delete_cookie(session, "foo") + assert_error(response, "no such window") + + +def test_unknown_cookie(session): + response = delete_cookie(session, "stilton") + assert_success(response) diff --git a/testing/web-platform/tests/webdriver/tests/classic/delete_cookie/user_prompts.py b/testing/web-platform/tests/webdriver/tests/classic/delete_cookie/user_prompts.py new file mode 100644 index 0000000000..1ed7db6e8e --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/delete_cookie/user_prompts.py @@ -0,0 +1,119 @@ +# META: timeout=long + +import pytest + +from webdriver.error import NoSuchCookieException + +from tests.support.asserts import assert_dialog_handled, assert_error, assert_success + + +def delete_cookie(session, name): + return session.transport.send("DELETE", "/session/%s/cookie/%s" % (session.session_id, name)) + + +@pytest.fixture +def check_user_prompt_closed_without_exception(session, create_dialog, create_cookie): + def check_user_prompt_closed_without_exception(dialog_type, retval): + create_cookie("foo", value="bar", path="/common/blank.html") + + create_dialog(dialog_type, text=dialog_type) + + response = delete_cookie(session, "foo") + assert_success(response) + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + with pytest.raises(NoSuchCookieException): + assert session.cookies("foo") + + return check_user_prompt_closed_without_exception + + +@pytest.fixture +def check_user_prompt_closed_with_exception(session, create_dialog, create_cookie): + def check_user_prompt_closed_with_exception(dialog_type, retval): + create_cookie("foo", value="bar", path="/common/blank.html") + + create_dialog(dialog_type, text=dialog_type) + + response = delete_cookie(session, "foo") + assert_error(response, "unexpected alert open") + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + assert session.cookies("foo") + + return check_user_prompt_closed_with_exception + + +@pytest.fixture +def check_user_prompt_not_closed_but_exception(session, create_dialog, create_cookie): + def check_user_prompt_not_closed_but_exception(dialog_type): + create_cookie("foo", value="bar", path="/common/blank.html") + + create_dialog(dialog_type, text=dialog_type) + + response = delete_cookie(session, "foo") + assert_error(response, "unexpected alert open") + + assert session.alert.text == dialog_type + session.alert.dismiss() + + assert session.cookies("foo") + + return check_user_prompt_not_closed_but_exception + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", True), + ("prompt", ""), +]) +def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval): + check_user_prompt_closed_without_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", True), + ("prompt", ""), +]) +def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval): + check_user_prompt_closed_without_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"}) +@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"]) +def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type): + check_user_prompt_not_closed_but_exception(dialog_type) + + +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_default(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) diff --git a/testing/web-platform/tests/webdriver/tests/classic/delete_session/__init__.py b/testing/web-platform/tests/webdriver/tests/classic/delete_session/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/classic/delete_session/delete.py b/testing/web-platform/tests/webdriver/tests/classic/delete_session/delete.py new file mode 100644 index 0000000000..a3032cc134 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/delete_session/delete.py @@ -0,0 +1,42 @@ +import pytest +from webdriver import error + +from tests.support.asserts import assert_success + + +def delete_session(session): + return session.transport.send("DELETE", "session/{session_id}".format(**vars(session))) + + +def test_null_response_value(session): + response = delete_session(session) + value = assert_success(response) + assert value is None + + # Need an explicit call to session.end() to notify the test harness + # that a new session needs to be created for subsequent tests. + session.end() + + +def test_dismissed_beforeunload_prompt(session, inline): + session.url = inline(""" + + + """) + + session.find.css("input", all=False).send_keys("foo") + + response = delete_session(session) + assert_success(response) + + # A beforeunload prompt has to be automatically dismissed, and the session deleted + with pytest.raises(error.InvalidSessionIdException): + session.alert.text + + # Need an explicit call to session.end() to notify the test harness + # that a new session needs to be created for subsequent tests. + session.end() diff --git a/testing/web-platform/tests/webdriver/tests/classic/dismiss_alert/__init__.py b/testing/web-platform/tests/webdriver/tests/classic/dismiss_alert/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/classic/dismiss_alert/dismiss.py b/testing/web-platform/tests/webdriver/tests/classic/dismiss_alert/dismiss.py new file mode 100644 index 0000000000..a28dec7687 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/dismiss_alert/dismiss.py @@ -0,0 +1,109 @@ +import pytest + +from webdriver.error import NoSuchAlertException + +from tests.support.asserts import assert_error, assert_success +from tests.support.helpers import wait_for_new_handle +from tests.support.sync import Poll + + +def dismiss_alert(session): + return session.transport.send( + "POST", "session/{session_id}/alert/dismiss".format(**vars(session))) + + +def test_null_response_value(session, inline): + session.url = inline("") + + response = dismiss_alert(session) + value = assert_success(response) + assert value is None + + +def test_no_top_browsing_context(session, closed_window): + response = dismiss_alert(session) + assert_error(response, "no such window") + + +def test_no_browsing_context(session, closed_frame): + response = dismiss_alert(session) + assert_error(response, "no such alert") + + +def test_no_user_prompt(session): + response = dismiss_alert(session) + assert_error(response, "no such alert") + + +def test_dismiss_alert(session, inline): + session.url = inline("") + + response = dismiss_alert(session) + assert_success(response) + + with pytest.raises(NoSuchAlertException): + session.alert.text + + +def test_dismiss_confirm(session, inline): + session.url = inline("") + + response = dismiss_alert(session) + assert_success(response) + + with pytest.raises(NoSuchAlertException): + session.alert.text + + assert session.execute_script("return window.result;") is False + + +def test_dismiss_prompt(session, inline): + session.url = inline(""" + + """) + + response = dismiss_alert(session) + assert_success(response) + + with pytest.raises(NoSuchAlertException): + session.alert.text + + assert session.execute_script("return window.result") is None + + +def test_unexpected_alert(session): + session.execute_script("setTimeout(function() { alert('Hello'); }, 100);") + + wait = Poll( + session, + timeout=5, + ignored_exceptions=NoSuchAlertException, + message="No user prompt with text 'Hello' detected") + wait.until(lambda s: s.alert.text == "Hello") + + response = dismiss_alert(session) + assert_success(response) + + with pytest.raises(NoSuchAlertException): + session.alert.text + + +def test_dismiss_in_popup_window(session, inline): + orig_handles = session.handles + + session.url = inline(""" + + """) + button = session.find.css("button", all=False) + button.click() + + session.window_handle = wait_for_new_handle(session, orig_handles) + session.url = inline(""" + + """) + + response = dismiss_alert(session) + assert_success(response) + + with pytest.raises(NoSuchAlertException): + session.alert.text diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_clear/__init__.py b/testing/web-platform/tests/webdriver/tests/classic/element_clear/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_clear/clear.py b/testing/web-platform/tests/webdriver/tests/classic/element_clear/clear.py new file mode 100644 index 0000000000..9b0d7f2133 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/element_clear/clear.py @@ -0,0 +1,454 @@ +# META: timeout=long + +import pytest +from webdriver import Element + +from tests.support.asserts import ( + assert_element_has_focus, + assert_error, + assert_events_equal, + assert_in_events, + assert_success, +) + + +@pytest.fixture +def tracked_events(): + return [ + "blur", + "change", + "focus", + ] + + +def element_clear(session, element): + return session.transport.send( + "POST", "/session/{session_id}/element/{element_id}/clear".format( + session_id=session.session_id, + element_id=element.id)) + + +@pytest.fixture(scope="session") +def text_file(tmpdir_factory): + fh = tmpdir_factory.mktemp("tmp").join("hello.txt") + fh.write("hello") + return fh + + +def test_null_response_value(session, inline): + session.url = inline("") + element = session.find.css("input", all=False) + + response = element_clear(session, element) + value = assert_success(response) + assert value is None + + +def test_no_top_browsing_context(session, closed_window): + element = Element(session, "foo") + response = element_clear(session, element) + assert_error(response, "no such window") + + original_handle, element = closed_window + response = element_clear(session, element) + assert_error(response, "no such window") + + session.window_handle = original_handle + response = element_clear(session, element) + assert_error(response, "no such element") + + +def test_no_browsing_context(session, closed_frame): + element = Element(session, "foo") + + response = element_clear(session, element) + assert_error(response, "no such window") + + +def test_no_such_element_with_invalid_value(session): + element = Element(session, "foo") + + response = element_clear(session, element) + assert_error(response, "no such element") + + +def test_no_such_element_with_shadow_root(session, get_test_page): + session.url = get_test_page() + + element = session.find.css("custom-element", all=False) + + result = element_clear(session, element.shadow_root) + assert_error(result, "no such element") + + +@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"]) +def test_no_such_element_from_other_window_handle(session, inline, closed): + session.url = inline("

") + element = session.find.css("#parent", all=False) + + new_handle = session.new_window() + + if closed: + session.window.close() + + session.window_handle = new_handle + + response = element_clear(session, element) + assert_error(response, "no such element") + + +@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"]) +def test_no_such_element_from_other_frame(session, get_test_page, closed): + session.url = get_test_page(as_frame=True) + + frame = session.find.css("iframe", all=False) + session.switch_frame(frame) + + element = session.find.css("div", all=False) + + session.switch_frame("parent") + + if closed: + session.execute_script("arguments[0].remove();", args=[frame]) + + response = element_clear(session, element) + assert_error(response, "no such element") + + +@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"]) +def test_stale_element_reference(session, stale_element, as_frame): + element = stale_element("input#text", as_frame=as_frame) + + response = element_clear(session, element) + assert_error(response, "stale element reference") + + +def test_pointer_interactable(session, inline): + session.url = inline("") + element = session.find.css("input", all=False) + + response = element_clear(session, element) + assert_error(response, "element not interactable") + + +def test_keyboard_interactable(session, inline): + session.url = inline(""" + +

+ + + """) + element = session.find.css("input", all=False) + assert element.property("value") == "foobar" + + response = element_clear(session, element) + assert_success(response) + assert element.property("value") == "" + + +@pytest.mark.parametrize("type,value,default", + [("number", "42", ""), + ("range", "42", "50"), + ("email", "foo@example.com", ""), + ("password", "password", ""), + ("search", "search", ""), + ("tel", "999", ""), + ("text", "text", ""), + ("url", "https://example.com/", ""), + ("color", "#ff0000", "#000000"), + ("date", "2017-12-26", ""), + ("datetime", "2017-12-26T19:48", ""), + ("datetime-local", "2017-12-26T19:48", ""), + ("time", "19:48", ""), + ("month", "2017-11", ""), + ("week", "2017-W52", "")]) +def test_input(session, inline, add_event_listeners, tracked_events, type, value, default): + session.url = inline("" % (type, value)) + element = session.find.css("input", all=False) + add_event_listeners(element, tracked_events) + assert element.property("value") == value + + response = element_clear(session, element) + assert_success(response) + assert element.property("value") == default + assert_in_events(session, ["focus", "change", "blur"]) + assert_element_has_focus(session.execute_script("return document.body")) + + +@pytest.mark.parametrize("type", + ["number", + "range", + "email", + "password", + "search", + "tel", + "text", + "url", + "color", + "date", + "datetime", + "datetime-local", + "time", + "month", + "week", + "file"]) +def test_input_disabled(session, inline, type): + session.url = inline("" % type) + element = session.find.css("input", all=False) + + response = element_clear(session, element) + assert_error(response, "invalid element state") + + +@pytest.mark.parametrize("type", + ["number", + "range", + "email", + "password", + "search", + "tel", + "text", + "url", + "color", + "date", + "datetime", + "datetime-local", + "time", + "month", + "week", + "file"]) +def test_input_readonly(session, inline, type): + session.url = inline("" % type) + element = session.find.css("input", all=False) + + response = element_clear(session, element) + assert_error(response, "invalid element state") + + +def test_textarea(session, inline, add_event_listeners, tracked_events): + session.url = inline("") + element = session.find.css("textarea", all=False) + add_event_listeners(element, tracked_events) + assert element.property("value") == "foobar" + + response = element_clear(session, element) + assert_success(response) + assert element.property("value") == "" + assert_in_events(session, ["focus", "change", "blur"]) + + +def test_textarea_disabled(session, inline): + session.url = inline("") + element = session.find.css("textarea", all=False) + + response = element_clear(session, element) + assert_error(response, "invalid element state") + + +def test_textarea_readonly(session, inline): + session.url = inline("") + element = session.find.css("textarea", all=False) + + response = element_clear(session, element) + assert_error(response, "invalid element state") + + +def test_input_file(session, text_file, inline): + session.url = inline("") + element = session.find.css("input", all=False) + element.send_keys(str(text_file)) + + response = element_clear(session, element) + assert_success(response) + assert element.property("value") == "" + + +def test_input_file_multiple(session, text_file, inline): + session.url = inline("") + element = session.find.css("input", all=False) + element.send_keys(str(text_file)) + element.send_keys(str(text_file)) + + response = element_clear(session, element) + assert_success(response) + assert element.property("value") == "" + + +def test_select(session, inline): + session.url = inline(""" + + """) + select = session.find.css("select", all=False) + option = session.find.css("option", all=False) + + response = element_clear(session, select) + assert_error(response, "invalid element state") + response = element_clear(session, option) + assert_error(response, "invalid element state") + + +def test_button(session, inline): + session.url = inline("") + button = session.find.css("button", all=False) + + response = element_clear(session, button) + assert_error(response, "invalid element state") + + +def test_button_with_subtree(session, inline): + """ + Elements inside button elements are interactable. + """ + session.url = inline(""" + + """) + text_field = session.find.css("input", all=False) + + response = element_clear(session, text_field) + assert_success(response) + + +def test_contenteditable(session, inline, add_event_listeners, tracked_events): + session.url = inline("

foobar

") + element = session.find.css("p", all=False) + add_event_listeners(element, tracked_events) + assert element.property("innerHTML") == "foobar" + + response = element_clear(session, element) + assert_success(response) + assert element.property("innerHTML") == "" + assert_events_equal(session, ["focus", "blur"]) + assert_element_has_focus(session.execute_script("return document.body")) + + +def test_designmode(session, inline): + session.url = inline("foobar") + element = session.find.css("body", all=False) + assert element.property("innerHTML") == "foobar" + session.execute_script("document.designMode = 'on'") + + response = element_clear(session, element) + assert_success(response) + assert element.property("innerHTML") in ["", "
"] + assert_element_has_focus(session.execute_script("return document.body")) + + +def test_resettable_element_focus_when_empty(session, inline, add_event_listeners, tracked_events): + session.url = inline("") + element = session.find.css("input", all=False) + add_event_listeners(element, tracked_events) + assert element.property("value") == "" + + response = element_clear(session, element) + assert_success(response) + assert element.property("value") == "" + assert_events_equal(session, []) + + +@pytest.mark.parametrize("type,invalid_value", + [("number", "foo"), + ("range", "foo"), + ("email", "foo"), + ("url", "foo"), + ("color", "foo"), + ("date", "foo"), + ("datetime", "foo"), + ("datetime-local", "foo"), + ("time", "foo"), + ("month", "foo"), + ("week", "foo")]) +def test_resettable_element_does_not_satisfy_validation_constraints(session, inline, type, invalid_value): + """ + Some UAs allow invalid input to certain types of constrained + form controls. For example, Gecko allows non-valid characters + to be typed into but Chrome does not. + Since we want to test that Element Clear works for clearing the + invalid characters in these UAs, it is fine to skip this test + where UAs do not allow the element to not satisfy its constraints. + """ + session.url = inline("" % type) + element = session.find.css("input", all=False) + + def is_valid(element): + return session.execute_script(""" + var input = arguments[0]; + return input.validity.valid; + """, args=(element,)) + + # value property does not get updated if the input is invalid + element.send_keys(invalid_value) + + # UA does not allow invalid input for this form control type + if is_valid(element): + return + + response = element_clear(session, element) + assert_success(response) + assert is_valid(element) + + +@pytest.mark.parametrize("type", + ["checkbox", + "radio", + "hidden", + "submit", + "button", + "image"]) +def test_non_editable_inputs(session, inline, type): + session.url = inline("" % type) + element = session.find.css("input", all=False) + + response = element_clear(session, element) + assert_error(response, "invalid element state") + + +def test_scroll_into_view(session, inline): + session.url = inline(""" + +
+ """) + element = session.find.css("input", all=False) + assert element.property("value") == "foobar" + assert session.execute_script("return window.pageYOffset") == 0 + + # scroll to the bottom right of the page + session.execute_script(""" + var body = document.body; + window.scrollTo(body.scrollWidth, body.scrollHeight); + """) + + # clear and scroll back to the top of the page + response = element_clear(session, element) + assert_success(response) + assert element.property("value") == "" + + # check if element cleared is scrolled into view + rect = session.execute_script(""" + var input = arguments[0]; + var rect = input.getBoundingClientRect(); + return {"top": rect.top, + "left": rect.left, + "height": rect.height, + "width": rect.width}; + """, args=(element,)) + window = session.execute_script(""" + return {"innerHeight": window.innerHeight, + "innerWidth": window.innerWidth, + "pageXOffset": window.pageXOffset, + "pageYOffset": window.pageYOffset}; + """) + + assert rect["top"] < (window["innerHeight"] + window["pageYOffset"]) and \ + rect["left"] < (window["innerWidth"] + window["pageXOffset"]) and \ + (rect["top"] + element.rect["height"]) > window["pageYOffset"] and \ + (rect["left"] + element.rect["width"]) > window["pageXOffset"] diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_clear/user_prompts.py b/testing/web-platform/tests/webdriver/tests/classic/element_clear/user_prompts.py new file mode 100644 index 0000000000..7a8564a684 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/element_clear/user_prompts.py @@ -0,0 +1,131 @@ +# META: timeout=long + +import pytest + +from tests.support.asserts import assert_dialog_handled, assert_error, assert_success + + +def element_clear(session, element): + return session.transport.send( + "POST", "/session/{session_id}/element/{element_id}/clear".format( + session_id=session.session_id, + element_id=element.id)) + + +@pytest.fixture +def check_user_prompt_closed_without_exception(session, create_dialog, inline): + def check_user_prompt_closed_without_exception(dialog_type, retval): + session.url = inline("") + element = session.find.css("input", all=False) + element.send_keys("foo") + + assert element.property("value") == "foo" + + create_dialog(dialog_type, text=dialog_type) + + response = element_clear(session, element) + assert_success(response) + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + assert element.property("value") == "" + + return check_user_prompt_closed_without_exception + + +@pytest.fixture +def check_user_prompt_closed_with_exception(session, create_dialog, inline): + def check_user_prompt_closed_with_exception(dialog_type, retval): + session.url = inline("") + element = session.find.css("input", all=False) + element.send_keys("foo") + + assert element.property("value") == "foo" + + create_dialog(dialog_type, text=dialog_type) + + response = element_clear(session, element) + assert_error(response, "unexpected alert open") + + assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval) + + assert element.property("value") == "foo" + + return check_user_prompt_closed_with_exception + + +@pytest.fixture +def check_user_prompt_not_closed_but_exception(session, create_dialog, inline): + def check_user_prompt_not_closed_but_exception(dialog_type): + session.url = inline("") + element = session.find.css("input", all=False) + element.send_keys("foo") + + assert element.property("value") == "foo" + + create_dialog(dialog_type, text=dialog_type) + + response = element_clear(session, element) + assert_error(response, "unexpected alert open") + + assert session.alert.text == dialog_type + session.alert.dismiss() + + assert element.property("value") == "foo" + + return check_user_prompt_not_closed_but_exception + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", True), + ("prompt", ""), +]) +def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval): + check_user_prompt_closed_without_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", True), + ("prompt", ""), +]) +def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval): + check_user_prompt_closed_without_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"}) +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) + + +@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"}) +@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"]) +def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type): + check_user_prompt_not_closed_but_exception(dialog_type) + + +@pytest.mark.parametrize("dialog_type, retval", [ + ("alert", None), + ("confirm", False), + ("prompt", None), +]) +def test_default(check_user_prompt_closed_with_exception, dialog_type, retval): + check_user_prompt_closed_with_exception(dialog_type, retval) diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_click/__init__.py b/testing/web-platform/tests/webdriver/tests/classic/element_click/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_click/bubbling.py b/testing/web-platform/tests/webdriver/tests/classic/element_click/bubbling.py new file mode 100644 index 0000000000..7620ec3224 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/element_click/bubbling.py @@ -0,0 +1,157 @@ +from tests.support.asserts import assert_success + + +def element_click(session, element): + return session.transport.send( + "POST", "/session/{session_id}/element/{element_id}/click".format( + session_id=session.session_id, + element_id=element.id)) + + +def test_click_event_bubbles_to_parents(session, inline): + session.url = inline(""" + + +
THREE +
TWO +
ONE
+
+
+ + + """) + three, two, one = session.find.css("div") + one.click() + + clicks = session.execute_script("return window.clicks") + assert one in clicks + assert two in clicks + assert three in clicks + + +def test_spin_event_loop(session, inline): + """ + Wait until the user agent event loop has spun enough times to + process the DOM events generated by clicking. + """ + session.url = inline(""" + + +
THREE +
TWO +
ONE
+
+
+ + + """) + three, two, one = session.find.css("div") + one.click() + + delayed_clicks = session.execute_script("return window.delayedClicks") + assert one in delayed_clicks + assert two in delayed_clicks + assert three in delayed_clicks + + +def test_element_disappears_during_click(session, inline): + """ + When an element in the event bubbling order disappears (its CSS + display style is set to "none") during a click, Gecko and Blink + exhibit different behaviour. Whilst Chrome fires a "click" + DOM event on , Firefox does not. + + A WebDriver implementation may choose to wait for this event to let + the event loops spin enough times to let click events propagate, + so this is a corner case test that Firefox does not hang indefinitely. + """ + session.url = inline(""" + + + +
+
+ +
+ + + + """) + over = session.find.css("#over", all=False) + + # should not time out + response = element_click(session, over) + assert_success(response) diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_click/center_point.py b/testing/web-platform/tests/webdriver/tests/classic/element_click/center_point.py new file mode 100644 index 0000000000..eb5cc19f14 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/element_click/center_point.py @@ -0,0 +1,64 @@ +import pytest + +from tests.support.asserts import assert_error, assert_success +from tests.support.helpers import center_point + + +def element_click(session, element): + return session.transport.send( + "POST", "session/{session_id}/element/{element_id}/click".format( + session_id=session.session_id, + element_id=element.id)) + + +def square(inline, size): + return inline(""" + + +
+ + + """.format(size=size)) + + +def assert_one_click(session): + """Asserts there has only been one click, and returns that.""" + clicks = session.execute_script("return window.clicks") + assert len(clicks) == 1 + return tuple(clicks[0]) + + +def test_entirely_in_view(session, inline): + session.url = square(inline, 300) + element = session.find.css("#target", all=False) + + response = element_click(session, element) + assert_success(response) + + click_point = assert_one_click(session) + assert click_point == (150, 150) + + +@pytest.mark.parametrize("size", range(1, 11)) +def test_css_pixel_rounding(session, inline, size): + session.url = square(inline, size) + element = session.find.css("#target", all=False) + expected_click_point = center_point(element) + + response = element_click(session, element) + assert_success(response) + + actual_click_point = assert_one_click(session) + assert actual_click_point == expected_click_point diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_click/click.py b/testing/web-platform/tests/webdriver/tests/classic/element_click/click.py new file mode 100644 index 0000000000..3c3f7d70e6 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/element_click/click.py @@ -0,0 +1,99 @@ +import pytest +from webdriver import Element + +from tests.support.asserts import assert_error, assert_success + + +def element_click(session, element): + return session.transport.send( + "POST", "session/{session_id}/element/{element_id}/click".format( + session_id=session.session_id, + element_id=element.id)) + + +def test_null_response_value(session, inline): + session.url = inline("

foo") + element = session.find.css("p", all=False) + + response = element_click(session, element) + value = assert_success(response) + assert value is None + + +def test_no_top_browsing_context(session, closed_window): + element = Element(session, "foo") + response = element_click(session, element) + assert_error(response, "no such window") + + original_handle, element = closed_window + response = element_click(session, element) + assert_error(response, "no such window") + + session.window_handle = original_handle + response = element_click(session, element) + assert_error(response, "no such element") + + +def test_no_browsing_context(session, closed_frame): + element = Element(session, "foo") + + response = element_click(session, element) + assert_error(response, "no such window") + + +def test_no_such_element_with_invalid_value(session): + element = Element(session, "foo") + + response = element_click(session, element) + assert_error(response, "no such element") + + +def test_no_such_element_with_shadow_root(session, get_test_page): + session.url = get_test_page() + + element = session.find.css("custom-element", all=False) + + result = element_click(session, element.shadow_root) + assert_error(result, "no such element") + + +@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"]) +def test_no_such_element_from_other_window_handle(session, inline, closed): + session.url = inline("

") + element = session.find.css("#parent", all=False) + + new_handle = session.new_window() + + if closed: + session.window.close() + + session.window_handle = new_handle + + response = element_click(session, element) + assert_error(response, "no such element") + + +@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"]) +def test_no_such_element_from_other_frame(session, get_test_page, closed): + session.url = get_test_page(as_frame=True) + + frame = session.find.css("iframe", all=False) + session.switch_frame(frame) + + element = session.find.css("input#text", all=False) + + session.switch_frame("parent") + + if closed: + session.execute_script("arguments[0].remove();", args=[frame]) + + response = element_click(session, element) + assert_error(response, "no such element") + + +@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"]) +def test_stale_element_reference(session, stale_element, as_frame): + element = stale_element("input#text", as_frame=as_frame) + + response = element_click(session, element) + assert_error(response, "stale element reference") diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_click/events.py b/testing/web-platform/tests/webdriver/tests/classic/element_click/events.py new file mode 100644 index 0000000000..30f2dfa0a4 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/element_click/events.py @@ -0,0 +1,35 @@ +from webdriver import Element +from tests.support.asserts import assert_success +from tests.support.helpers import filter_dict + +def get_events(session): + """Return list of mouse events recorded in the fixture.""" + return session.execute_script("return allEvents.events;") or [] + +def element_click(session, element): + return session.transport.send( + "POST", "session/{session_id}/element/{element_id}/click".format( + session_id=session.session_id, + element_id=element.id)) + +def test_event_mousemove(session, url): + session.url = url( + "/webdriver/tests/classic/element_click/support/test_click_wdspec.html" + ) + + element = session.find.css('#outer', all=False) + response = element_click(session, element) + assert_success(response) + + events = get_events(session) + assert len(events) == 4 + + expected = [ + {"type": "mousemove", "buttons": 0, "button": 0}, + {"type": "mousedown", "buttons": 1, "button": 0}, + {"type": "mouseup", "buttons": 0, "button": 0}, + {"type": "click", "buttons": 0, "button": 0}, + ] + filtered_events = [filter_dict(e, expected[0]) for e in events] + + assert expected == filtered_events diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_click/file_upload.py b/testing/web-platform/tests/webdriver/tests/classic/element_click/file_upload.py new file mode 100644 index 0000000000..73832d0f85 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/element_click/file_upload.py @@ -0,0 +1,16 @@ +from tests.support.asserts import assert_error, assert_success + + +def element_click(session, element): + return session.transport.send( + "POST", "session/{session_id}/element/{element_id}/click".format( + session_id=session.session_id, + element_id=element.id)) + + +def test_file_upload_state(session,inline): + session.url = inline("") + + element = session.find.css("input", all=False) + response = element_click(session, element) + assert_error(response, "invalid argument") diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_click/interactability.py b/testing/web-platform/tests/webdriver/tests/classic/element_click/interactability.py new file mode 100644 index 0000000000..d55860c874 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/element_click/interactability.py @@ -0,0 +1,130 @@ +import pytest + +from tests.support.asserts import assert_error, assert_success + + +def element_click(session, element): + return session.transport.send( + "POST", "session/{session_id}/element/{element_id}/click".format( + session_id=session.session_id, + element_id=element.id)) + + +def test_display_none(session, inline): + session.url = inline("""""") + element = session.find.css("button", all=False) + + response = element_click(session, element) + assert_error(response, "element not interactable") + + +def test_visibility_hidden(session, inline): + session.url = inline("""""") + element = session.find.css("button", all=False) + + response = element_click(session, element) + assert_error(response, "element not interactable") + + +def test_hidden(session, inline): + session.url = inline("") + element = session.find.css("button", all=False) + + response = element_click(session, element) + assert_error(response, "element not interactable") + + +def test_disabled(session, inline): + session.url = inline("""""") + element = session.find.css("button", all=False) + + response = element_click(session, element) + assert_success(response) + + +@pytest.mark.parametrize("transform", ["translate(-100px, -100px)", "rotate(50deg)"]) +def test_element_not_interactable_css_transform(session, inline, transform): + session.url = inline(""" +

+ +
""".format(transform=transform)) + element = session.find.css("input", all=False) + response = element_click(session, element) + assert_error(response, "element not interactable") + + +def test_element_not_interactable_out_of_view(session, inline): + session.url = inline(""" + + + + """) + element = session.find.css("input", all=False) + response = element_click(session, element) + assert_error(response, "element not interactable") + + +@pytest.mark.parametrize("tag_name", ["div", "span"]) +def test_zero_sized_element(session, inline, tag_name): + session.url = inline("<{0}>".format(tag_name)) + element = session.find.css(tag_name, all=False) + + response = element_click(session, element) + assert_error(response, "element not interactable") + + +def test_element_intercepted(session, inline): + session.url = inline(""" + + + +
+ """) + element = session.find.css("input", all=False) + response = element_click(session, element) + assert_error(response, "element click intercepted") + + +def test_element_intercepted_no_pointer_events(session, inline): + session.url = inline("""""") + element = session.find.css("input", all=False) + response = element_click(session, element) + assert_error(response, "element click intercepted") + + +def test_element_not_visible_overflow_hidden(session, inline): + session.url = inline(""" + + +
+ """) + element = session.find.css("input", all=False) + response = element_click(session, element) + assert_error(response, "element not interactable") diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_click/navigate.py b/testing/web-platform/tests/webdriver/tests/classic/element_click/navigate.py new file mode 100644 index 0000000000..987d4686f0 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/element_click/navigate.py @@ -0,0 +1,198 @@ +import pytest +from webdriver import error + +from tests.support.asserts import assert_success +from tests.support.helpers import wait_for_new_handle +from tests.support.sync import Poll + + +def element_click(session, element): + return session.transport.send( + "POST", "session/{session_id}/element/{element_id}/click".format( + session_id=session.session_id, + element_id=element.id)) + + +def test_numbers_link(session, server_config, inline): + link = "/webdriver/tests/classic/element_click/support/input.html" + session.url = inline("123456".format(url=link)) + element = session.find.css("a", all=False) + response = element_click(session, element) + assert_success(response) + host = server_config["browser_host"] + port = server_config["ports"]["http"][0] + + assert session.url == "http://{host}:{port}{url}".format(host=host, port=port, url=link) + + +def test_multi_line_link(session, server_config, inline): + link = "/webdriver/tests/classic/element_click/support/input.html" + session.url = inline(""" +

+ Helloooooooooooooooooooo Worlddddddddddddddd +

""".format(url=link)) + element = session.find.css("a", all=False) + response = element_click(session, element) + assert_success(response) + host = server_config["browser_host"] + port = server_config["ports"]["http"][0] + + assert session.url == "http://{host}:{port}{url}".format(host=host, port=port, url=link) + + +def test_link_unload_event(session, server_config, inline): + link = "/webdriver/tests/classic/element_click/support/input.html" + session.url = inline(""" + + click here + + + """.format(url=link)) + + element = session.find.css("a", all=False) + response = element_click(session, element) + assert_success(response) + + host = server_config["browser_host"] + port = server_config["ports"]["http"][0] + assert session.url == "http://{host}:{port}{url}".format(host=host, port=port, url=link) + + session.back() + + element = session.find.css("input", all=False) + response = session.execute_script(""" + let input = arguments[0]; + return input.checked; + """, args=(element,)) + assert response is True + + +def test_link_hash(session, inline): + id = "anchor" + session.url = inline(""" + aaaa +

scroll here

+ """.format(url=id, id=id)) + old_url = session.url + + element = session.find.css("a", all=False) + response = element_click(session, element) + assert_success(response) + + new_url = session.url + assert "{url}#{id}".format(url=old_url, id=id) == new_url + + element = session.find.css("p", all=False) + assert session.execute_script(""" + let input = arguments[0]; + rect = input.getBoundingClientRect(); + return rect["top"] >= 0 && rect["left"] >= 0 && + (rect["top"] + rect["height"]) <= window.innerHeight && + (rect["left"] + rect["width"]) <= window.innerWidth; + """, args=(element,)) is True + + +@pytest.mark.parametrize("target", [ + "", + "_blank", + "_parent", + "_self", + "_top", +]) +def test_link_from_toplevel_context_with_target(session, inline, target): + target_page = inline("

foo

") + + session.url = inline("click".format(target_page, target)) + element = session.find.css("a", all=False) + + orig_handles = session.handles + + response = element_click(session, element) + assert_success(response) + + if target == "_blank": + session.window_handle = wait_for_new_handle(session, orig_handles) + + wait = Poll( + session, + timeout=5, + ignored_exceptions=error.NoSuchElementException, + message="Expected element has not been found") + wait.until(lambda s: s.find.css("#foo")) + + +@pytest.mark.parametrize("target", [ + "", + "_blank", + "_parent", + "_self", + "_top", +]) +def test_link_from_nested_context_with_target(session, inline, iframe, target): + target_page = inline("

foo

") + + session.url = inline(iframe("click".format(target_page, target))) + frame = session.find.css("iframe", all=False) + session.switch_frame(frame) + element = session.find.css("a".format(target), all=False) + + orig_handles = session.handles + + response = element_click(session, element) + assert_success(response) + + if target == "_blank": + session.window_handle = wait_for_new_handle(session, orig_handles) + + # With the current browsing context removed the navigation should + # not timeout. Switch to the target context, and wait until the expected + # element is available. + if target == "_parent": + session.switch_frame("parent") + elif target == "_top": + session.switch_frame(None) + + wait = Poll( + session, + timeout=5, + ignored_exceptions=error.NoSuchElementException, + message="Expected element has not been found") + wait.until(lambda s: s.find.css("#foo")) + + +# Capability needed as long as no valid certificate is available: +# https://github.com/web-platform-tests/wpt/issues/28847 +@pytest.mark.capabilities({"acceptInsecureCerts": True}) +def test_link_cross_origin(session, inline, url): + base_path = ("/webdriver/tests/support/html/subframe.html" + + "?pipe=header(Cross-Origin-Opener-Policy,same-origin") + target_page = url(base_path, protocol="https", domain="alt") + + session.url = inline("click me".format(target_page), protocol="https") + link = session.find.css("a", all=False) + + response = element_click(session, link) + assert_success(response) + + assert session.url == target_page + with pytest.raises(error.StaleElementReferenceException): + link.click() + + session.find.css("#delete", all=False) + + +def test_link_closes_window(session, inline): + new_handle = session.new_window() + session.window_handle = new_handle + + session.url = inline("""Close me""") + element = session.find.css("a", all=False) + + response = element_click(session, element) + assert_success(response) + + assert new_handle not in session.handles diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_click/scroll_into_view.py b/testing/web-platform/tests/webdriver/tests/classic/element_click/scroll_into_view.py new file mode 100644 index 0000000000..c2dc648528 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/element_click/scroll_into_view.py @@ -0,0 +1,72 @@ +import pytest + +from tests.support.asserts import assert_error, assert_success +from tests.support.helpers import center_point + + +def element_click(session, element): + return session.transport.send( + "POST", "session/{session_id}/element/{element_id}/click".format( + session_id=session.session_id, + element_id=element.id)) + + +def assert_one_click(session): + """Asserts there has only been one click, and returns that.""" + clicks = session.execute_script("return window.clicks") + assert len(clicks) == 1 + return tuple(clicks[0]) + + +def test_scroll_into_view(session, inline): + session.url = inline(""" + """) + + element = session.find.css("input", all=False) + response = element_click(session, element) + assert_success(response) + + # Check if element clicked is scrolled into view + assert session.execute_script(""" + let input = arguments[0]; + rect = input.getBoundingClientRect(); + return rect["top"] >= 0 && rect["left"] >= 0 && + (rect["top"] + rect["height"]) <= window.innerHeight && + (rect["left"] + rect["width"]) <= window.innerWidth; + """, args=(element,)) is True + + +@pytest.mark.parametrize("offset", range(9, 0, -1)) +def test_partially_visible_does_not_scroll(session, offset, inline): + session.url = inline(""" + + +
+ + + """.format(offset=offset)) + target = session.find.css("div", all=False) + assert session.execute_script("return window.scrollY || document.documentElement.scrollTop") == 0 + response = element_click(session, target) + assert_success(response) + assert session.execute_script("return window.scrollY || document.documentElement.scrollTop") == 0 + click_point = assert_one_click(session) + assert click_point == center_point(target) diff --git a/testing/web-platform/tests/webdriver/tests/classic/element_click/select.py b/testing/web-platform/tests/webdriver/tests/classic/element_click/select.py new file mode 100644 index 0000000000..62d40755b5 --- /dev/null +++ b/testing/web-platform/tests/webdriver/tests/classic/element_click/select.py @@ -0,0 +1,223 @@ +def test_click_option(session, inline): + session.url = inline(""" + """) + options = session.find.css("option") + + assert options[0].selected + assert not options[1].selected + + options[1].click() + assert options[1].selected + assert not options[0].selected + + +def test_click_multiple_option(session, inline): + session.url = inline(""" + """) + options = session.find.css("option") + + assert not options[0].selected + assert not options[1].selected + + options[0].click() + assert options[0].selected + assert not options[1].selected + + +def test_click_preselected_option(session, inline): + session.url = inline(""" + """) + options = session.find.css("option") + + assert not options[0].selected + assert options[1].selected + + options[1].click() + assert options[1].selected + assert not options[0].selected + + options[0].click() + assert options[0].selected + assert not options[1].selected + + +def test_click_preselected_multiple_option(session, inline): + session.url = inline(""" + """) + options = session.find.css("option") + + assert not options[0].selected + assert options[1].selected + + options[1].click() + assert not options[1].selected + assert not options[0].selected + + options[0].click() + assert options[0].selected + assert not options[1].selected + + +def test_click_deselects_others(session, inline): + session.url = inline(""" + """) + options = session.find.css("option") + + options[0].click() + assert options[0].selected + options[1].click() + assert options[1].selected + options[2].click() + assert options[2].selected + options[0].click() + assert options[0].selected + + +def test_click_multiple_does_not_deselect_others(session, inline): + session.url = inline(""" + """) + options = session.find.css("option") + + options[0].click() + assert options[0].selected + options[1].click() + assert options[0].selected + assert options[1].selected + options[2].click() + assert options[0].selected + assert options[1].selected + assert options[2].selected + + +def test_click_selected_option(session, inline): + session.url = inline(""" + """) + options = session.find.css("option") + + # First