summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/webdriver
diff options
context:
space:
mode:
Diffstat (limited to 'testing/web-platform/tests/webdriver')
-rw-r--r--testing/web-platform/tests/webdriver/META.yml9
-rw-r--r--testing/web-platform/tests/webdriver/README.md17
-rw-r--r--testing/web-platform/tests/webdriver/tests/__init__.py4
-rw-r--r--testing/web-platform/tests/webdriver/tests/accept_alert/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/accept_alert/accept.py110
-rw-r--r--testing/web-platform/tests/webdriver/tests/add_cookie/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/add_cookie/add.py286
-rw-r--r--testing/web-platform/tests/webdriver/tests/add_cookie/user_prompts.py137
-rw-r--r--testing/web-platform/tests/webdriver/tests/back/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/back/back.py169
-rw-r--r--testing/web-platform/tests/webdriver/tests/back/conftest.py19
-rw-r--r--testing/web-platform/tests/webdriver/tests/back/user_prompts.py118
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/__init__.py77
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/__init__.py65
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/__init__.py94
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/capture_screenshot.py42
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/frame.py61
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/invalid.py26
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/window_handle.py7
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/close.py23
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/invalid.py31
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/context_created.py225
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/invalid.py53
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/reference_context.py46
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/type.py41
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/dom_content_loaded.py121
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/frames.py90
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/invalid.py27
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/max_depth.py121
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/root.py113
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/load.py110
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/__init__.py25
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/about_blank.py33
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/data_url.py101
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/error.py22
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/frame.py59
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/hash.py62
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/image.py56
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/invalid.py52
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/navigate.py83
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/black_dot.pngbin0 -> 70 bytes
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.html0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.js1
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg2
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.html0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.svg3
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/red_dot.pngbin0 -> 95 bytes
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/wait.py98
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/conftest.py48
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/errors/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/errors/errors.py16
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/log/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/__init__.py129
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console.py170
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console_args.py48
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/event_buffer.py96
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/javascript.py31
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/stacktrace.py121
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/subscription.py108
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/__init__.py230
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/before_request_sent_tentative.py295
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/combined/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/combined/network_events_tentative.py124
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/conftest.py90
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed_tentative.py274
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/response_started/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started_tentative.py250
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.html2
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.js1
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.pngbin0 -> 72 bytes
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.svg1
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.txt1
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/support/redirect_http_equiv.html4
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/network/support/redirected.html2
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/__init__.py50
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/arguments.py385
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/await_promise.py48
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/exception_details.py182
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/function_declaration.py14
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/internal_id.py67
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid.py123
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid_tentative.py38
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/realm.py71
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result.py160
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_node.py596
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_ownership.py60
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/sandbox.py204
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/strict_mode.py38
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/call_function/this.py85
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/conftest.py62
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/disown/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/disown/handles.py173
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid.py68
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid_tentative.py35
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/disown/target.py95
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/await_promise.py217
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/evaluate.py95
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/exception_details.py183
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/internal_id.py65
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid.py86
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid_tentative.py38
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result.py140
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_node.py586
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_ownership.py60
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/sandbox.py182
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/strict_mode.py34
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/context.py70
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/get_realms.py183
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/invalid.py26
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/sandbox.py238
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/type.py34
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/new/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/new/connect.py34
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/status/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/status/status.py11
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/contexts.py277
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/events.py138
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/invalid.py153
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/contexts.py167
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/events.py83
-rw-r--r--testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/invalid.py234
-rw-r--r--testing/web-platform/tests/webdriver/tests/close_window/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/close_window/close.py102
-rw-r--r--testing/web-platform/tests/webdriver/tests/close_window/user_prompts.py119
-rw-r--r--testing/web-platform/tests/webdriver/tests/conftest.py5
-rw-r--r--testing/web-platform/tests/webdriver/tests/delete_all_cookies/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/delete_all_cookies/delete.py22
-rw-r--r--testing/web-platform/tests/webdriver/tests/delete_all_cookies/user_prompts.py119
-rw-r--r--testing/web-platform/tests/webdriver/tests/delete_cookie/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/delete_cookie/delete.py29
-rw-r--r--testing/web-platform/tests/webdriver/tests/delete_cookie/user_prompts.py119
-rw-r--r--testing/web-platform/tests/webdriver/tests/delete_session/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/delete_session/delete.py42
-rw-r--r--testing/web-platform/tests/webdriver/tests/dismiss_alert/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/dismiss_alert/dismiss.py109
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_clear/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_clear/clear.py444
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_clear/user_prompts.py131
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/bubbling.py157
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/center_point.py64
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/click.py89
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/events.py33
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/file_upload.py16
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/interactability.py130
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/navigate.py197
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/scroll_into_view.py72
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/select.py223
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/shadow_dom.py101
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/support/input.html3
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/support/test_click_wdspec.html100
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_click/user_prompts.py122
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_send_keys/__init__.py2
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_send_keys/conftest.py17
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_send_keys/content_editable.py30
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_send_keys/events.py85
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_send_keys/file_upload.py262
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_send_keys/form_controls.py102
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_send_keys/interactability.py142
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_send_keys/scroll_into_view.py40
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_send_keys/send_keys.py111
-rw-r--r--testing/web-platform/tests/webdriver/tests/element_send_keys/user_prompts.py123
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_async_script/__init__.py16
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_async_script/arguments.py98
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_async_script/collections.py150
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_async_script/cyclic.py78
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_async_script/execute_async.py105
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_async_script/node.py87
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_async_script/objects.py49
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_async_script/promise.py118
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_async_script/properties.py64
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_async_script/user_prompts.py109
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_script/__init__.py16
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_script/arguments.py91
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_script/collections.py128
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_script/cyclic.py78
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_script/execute.py134
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_script/json_serialize_windowproxy.py51
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_script/node.py86
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_script/objects.py49
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_script/promise.py102
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_script/properties.py60
-rw-r--r--testing/web-platform/tests/webdriver/tests/execute_script/user_prompts.py107
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_element/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_element/find.py142
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_element/user_prompts.py120
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_element_from_element/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_element_from_element/find.py191
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_element_from_element/user_prompts.py125
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/conftest.py19
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/find.py193
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/user_prompts.py129
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_elements/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_elements/find.py162
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_elements/user_prompts.py122
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_elements_from_element/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_elements_from_element/find.py210
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_elements_from_element/user_prompts.py127
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/conftest.py19
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/find.py203
-rw-r--r--testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/user_prompts.py131
-rw-r--r--testing/web-platform/tests/webdriver/tests/forward/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/forward/conftest.py19
-rw-r--r--testing/web-platform/tests/webdriver/tests/forward/forward.py195
-rw-r--r--testing/web-platform/tests/webdriver/tests/forward/user_prompts.py121
-rw-r--r--testing/web-platform/tests/webdriver/tests/fullscreen_window/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/fullscreen_window/fullscreen.py53
-rw-r--r--testing/web-platform/tests/webdriver/tests/fullscreen_window/stress.py19
-rw-r--r--testing/web-platform/tests/webdriver/tests/fullscreen_window/user_prompts.py116
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_active_element/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_active_element/get.py132
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_active_element/user_prompts.py118
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_alert_text/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_alert_text/get.py70
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_computed_label/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_computed_label/get.py79
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_computed_role/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_computed_role/get.py77
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_current_url/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_current_url/file.py23
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_current_url/get.py74
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_current_url/iframe.py75
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_current_url/user_prompts.py111
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_attribute/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_attribute/get.py156
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_attribute/user_prompts.py117
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_css_value/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_css_value/get.py97
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_css_value/user_prompts.py120
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_property/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_property/get.py209
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_property/user_prompts.py115
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_rect/__init__.py1
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_rect/get.py89
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_rect/user_prompts.py120
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_shadow_root/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_shadow_root/conftest.py22
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_shadow_root/get.py96
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_shadow_root/user_prompts.py117
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_tag_name/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_tag_name/get.py85
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_tag_name/user_prompts.py114
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_text/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_text/get.py99
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_element_text/user_prompts.py116
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_named_cookie/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_named_cookie/get.py145
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_named_cookie/user_prompts.py118
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_page_source/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_page_source/source.py25
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_page_source/user_prompts.py112
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_timeouts/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_timeouts/get.py43
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_title/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_title/get.py56
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_title/iframe.py80
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_title/user_prompts.py134
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_window_handle/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_window_handle/get.py43
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_window_handle/user_prompts.py61
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_window_handles/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_window_handles/get.py37
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_window_handles/user_prompts.py61
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_window_rect/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_window_rect/get.py31
-rw-r--r--testing/web-platform/tests/webdriver/tests/get_window_rect/user_prompts.py113
-rw-r--r--testing/web-platform/tests/webdriver/tests/idlharness.window.js16
-rw-r--r--testing/web-platform/tests/webdriver/tests/interface/interface.py2
-rw-r--r--testing/web-platform/tests/webdriver/tests/is_element_enabled/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/is_element_enabled/enabled.py161
-rw-r--r--testing/web-platform/tests/webdriver/tests/is_element_enabled/user_prompts.py119
-rw-r--r--testing/web-platform/tests/webdriver/tests/is_element_selected/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/is_element_selected/selected.py128
-rw-r--r--testing/web-platform/tests/webdriver/tests/is_element_selected/user_prompts.py117
-rw-r--r--testing/web-platform/tests/webdriver/tests/maximize_window/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/maximize_window/maximize.py100
-rw-r--r--testing/web-platform/tests/webdriver/tests/maximize_window/stress.py43
-rw-r--r--testing/web-platform/tests/webdriver/tests/maximize_window/user_prompts.py117
-rw-r--r--testing/web-platform/tests/webdriver/tests/minimize_window/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/minimize_window/minimize.py69
-rw-r--r--testing/web-platform/tests/webdriver/tests/minimize_window/stress.py19
-rw-r--r--testing/web-platform/tests/webdriver/tests/minimize_window/user_prompts.py113
-rw-r--r--testing/web-platform/tests/webdriver/tests/navigate_to/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/navigate_to/file.py25
-rw-r--r--testing/web-platform/tests/webdriver/tests/navigate_to/navigate.py100
-rw-r--r--testing/web-platform/tests/webdriver/tests/navigate_to/user_prompts.py112
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/conftest.py79
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/create_alwaysMatch.py15
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/create_firstMatch.py16
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/default_values.py46
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/invalid_capabilities.py56
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/merge.py82
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/page_load_strategy.py7
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/platform_name.py11
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/response.py44
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/support/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/support/create.py136
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/timeouts.py32
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_session/websocket_url.py7
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_window/__init__.py10
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_window/new.py64
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_window/new_tab.py89
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_window/new_window.py89
-rw-r--r--testing/web-platform/tests/webdriver/tests/new_window/user_prompts.py121
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/conftest.py99
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/key.py38
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/key_events.py243
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/key_modifiers.py37
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/key_shortcuts.py49
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/key_special_keys.py38
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/none.py24
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/pointer_contextmenu.py78
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/pointer_dblclick.py33
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/pointer_modifier_click.py85
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/pointer_mouse.py194
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/pointer_origin.py131
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/pointer_pause_dblclick.py53
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/pointer_pen.py72
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/pointer_touch.py90
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/pointer_tripleclick.py36
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/sequence.py9
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/support/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/support/keys.py905
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/support/mouse.py26
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/support/refine.py29
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_pointer_wdspec.html102
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_scroll_wdspec.html103
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_wdspec.html216
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/user_prompts.py124
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/validity.py80
-rw-r--r--testing/web-platform/tests/webdriver/tests/perform_actions/wheel.py75
-rw-r--r--testing/web-platform/tests/webdriver/tests/permissions/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/permissions/set.py83
-rw-r--r--testing/web-platform/tests/webdriver/tests/print/__init__.py21
-rw-r--r--testing/web-platform/tests/webdriver/tests/print/printcmd.py142
-rw-r--r--testing/web-platform/tests/webdriver/tests/print/user_prompts.py111
-rw-r--r--testing/web-platform/tests/webdriver/tests/refresh/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/refresh/refresh.py123
-rw-r--r--testing/web-platform/tests/webdriver/tests/refresh/user_prompts.py117
-rw-r--r--testing/web-platform/tests/webdriver/tests/release_actions/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/release_actions/conftest.py40
-rw-r--r--testing/web-platform/tests/webdriver/tests/release_actions/release.py23
-rw-r--r--testing/web-platform/tests/webdriver/tests/release_actions/sequence.py83
-rw-r--r--testing/web-platform/tests/webdriver/tests/release_actions/support/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/release_actions/support/refine.py24
-rw-r--r--testing/web-platform/tests/webdriver/tests/release_actions/support/test_actions_wdspec.html197
-rw-r--r--testing/web-platform/tests/webdriver/tests/send_alert_text/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/send_alert_text/conftest.py24
-rw-r--r--testing/web-platform/tests/webdriver/tests/send_alert_text/send.py94
-rw-r--r--testing/web-platform/tests/webdriver/tests/set_timeouts/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/set_timeouts/set.py95
-rw-r--r--testing/web-platform/tests/webdriver/tests/set_timeouts/user_prompts.py62
-rw-r--r--testing/web-platform/tests/webdriver/tests/set_window_rect/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/set_window_rect/set.py403
-rw-r--r--testing/web-platform/tests/webdriver/tests/set_window_rect/user_prompts.py121
-rw-r--r--testing/web-platform/tests/webdriver/tests/status/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/status/status.py33
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/__init__.py14
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/asserts.py224
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/defaults.py6
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/fixtures.py256
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/fixtures_bidi.py116
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/fixtures_http.py176
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/helpers.py263
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/html/deleteframe.html6
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/html/frames.html16
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/html/frames_no_bfcache.html18
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/html/meta-utf8-after-1024-bytes.html17
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/html/render.html68
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/html/subframe.html16
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/http_handlers/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/http_handlers/authentication.py25
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/http_handlers/headers.py19
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/http_handlers/redirect.py19
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/http_handlers/status.py16
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/http_request.py40
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/image.py9
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/inline.py61
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/merge_dictionaries.py42
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/screenshot.py50
-rw-r--r--testing/web-platform/tests/webdriver/tests/support/sync.py276
-rw-r--r--testing/web-platform/tests/webdriver/tests/switch_to_frame/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/switch_to_frame/cross_origin.py63
-rw-r--r--testing/web-platform/tests/webdriver/tests/switch_to_frame/switch.py116
-rw-r--r--testing/web-platform/tests/webdriver/tests/switch_to_frame/switch_number.py50
-rw-r--r--testing/web-platform/tests/webdriver/tests/switch_to_frame/switch_webelement.py100
-rw-r--r--testing/web-platform/tests/webdriver/tests/switch_to_parent_frame/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/switch_to_parent_frame/switch.py85
-rw-r--r--testing/web-platform/tests/webdriver/tests/switch_to_window/__init__.py0
-rw-r--r--testing/web-platform/tests/webdriver/tests/switch_to_window/alerts.py33
-rw-r--r--testing/web-platform/tests/webdriver/tests/switch_to_window/switch.py100
-rw-r--r--testing/web-platform/tests/webdriver/tests/take_element_screenshot/__init__.py10
-rw-r--r--testing/web-platform/tests/webdriver/tests/take_element_screenshot/iframe.py121
-rw-r--r--testing/web-platform/tests/webdriver/tests/take_element_screenshot/screenshot.py90
-rw-r--r--testing/web-platform/tests/webdriver/tests/take_element_screenshot/user_prompts.py121
-rw-r--r--testing/web-platform/tests/webdriver/tests/take_screenshot/__init__.py21
-rw-r--r--testing/web-platform/tests/webdriver/tests/take_screenshot/iframe.py54
-rw-r--r--testing/web-platform/tests/webdriver/tests/take_screenshot/screenshot.py34
-rw-r--r--testing/web-platform/tests/webdriver/tests/take_screenshot/user_prompts.py113
418 files changed, 31687 insertions, 0 deletions
diff --git a/testing/web-platform/tests/webdriver/META.yml b/testing/web-platform/tests/webdriver/META.yml
new file mode 100644
index 0000000000..8475a173ab
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/META.yml
@@ -0,0 +1,9 @@
+spec: https://w3c.github.io/webdriver/
+suggested_reviewers:
+ - AutomatedTester
+ - bwalderman
+ - jgraham
+ - juliandescottes
+ - sadym-chromium
+ - shs96c
+ - whimboo
diff --git a/testing/web-platform/tests/webdriver/README.md b/testing/web-platform/tests/webdriver/README.md
new file mode 100644
index 0000000000..67bb294d6e
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/README.md
@@ -0,0 +1,17 @@
+# WebDriver specification tests
+
+Herein lies a set of conformance tests
+for the W3C web browser automation specification
+known as [WebDriver](http://w3c.github.io/webdriver/).
+The purpose of these tests is determine implementation compliance
+so that different driver implementations can determine
+whether they meet the recognized standard.
+
+## Chapters of the Spec that still need tests
+
+We are using a [tracking spreadsheet](https://docs.google.com/spreadsheets/d/1GUK_sdY2cv59VAJNDxZQIfypnOpapSQhMjfcJ9Wc42U/edit#gid=0)
+to coordinate work on these tests. Please look there to see who
+is working on what, and which areas are currently under-tested.
+
+The spec contributors and editors can frequently be found on the W3C
+#webdriver IRC channel.
diff --git a/testing/web-platform/tests/webdriver/tests/__init__.py b/testing/web-platform/tests/webdriver/tests/__init__.py
new file mode 100644
index 0000000000..0ba172ff2e
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/__init__.py
@@ -0,0 +1,4 @@
+import pytest
+
+# Enable pytest assert introspection for assertion helper
+pytest.register_assert_rewrite('tests.support.asserts')
diff --git a/testing/web-platform/tests/webdriver/tests/accept_alert/__init__.py b/testing/web-platform/tests/webdriver/tests/accept_alert/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/accept_alert/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/accept_alert/accept.py b/testing/web-platform/tests/webdriver/tests/accept_alert/accept.py
new file mode 100644
index 0000000000..b83477e5ca
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/accept_alert/accept.py
@@ -0,0 +1,110 @@
+import pytest
+
+from webdriver.error import NoSuchAlertException
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.helpers import wait_for_new_handle
+from tests.support.sync import Poll
+
+
+def accept_alert(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/alert/accept".format(**vars(session)))
+
+
+def test_null_response_value(session, inline):
+ session.url = inline("<script>window.alert('Hello');</script>")
+
+ response = accept_alert(session)
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_level_browsing_context(session, closed_window):
+ response = accept_alert(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = accept_alert(session)
+ assert_error(response, "no such alert")
+
+
+def test_no_user_prompt(session):
+ response = accept_alert(session)
+ assert_error(response, "no such alert")
+
+
+def test_accept_alert(session, inline):
+ session.url = inline("<script>window.alert('Hello');</script>")
+
+ response = accept_alert(session)
+ assert_success(response)
+
+ with pytest.raises(NoSuchAlertException):
+ session.alert.text
+
+
+def test_accept_confirm(session, inline):
+ session.url = inline("<script>window.result = window.confirm('Hello');</script>")
+
+ response = accept_alert(session)
+ assert_success(response)
+
+ with pytest.raises(NoSuchAlertException):
+ session.alert.text
+
+ assert session.execute_script("return window.result") is True
+
+
+def test_accept_prompt(session, inline):
+ session.url = inline("""
+ <script>
+ window.result = window.prompt('Enter Your Name: ', 'Federer');
+ </script>
+ """)
+
+ response = accept_alert(session)
+ assert_success(response)
+
+ with pytest.raises(NoSuchAlertException):
+ session.alert.text
+
+ assert session.execute_script("return window.result") == "Federer"
+
+
+def test_unexpected_alert(session):
+ session.execute_script("window.setTimeout(function() { window.alert('Hello'); }, 100);")
+ wait = Poll(
+ session,
+ timeout=5,
+ ignored_exceptions=NoSuchAlertException,
+ message="No user prompt with text 'Hello' detected")
+ wait.until(lambda s: s.alert.text == "Hello")
+
+ response = accept_alert(session)
+ assert_success(response)
+
+ with pytest.raises(NoSuchAlertException):
+ session.alert.text
+
+
+def test_accept_in_popup_window(session, inline):
+ orig_handles = session.handles
+
+ session.url = inline("""
+ <button onclick="window.open('about:blank', '_blank', 'width=500; height=200;resizable=yes');">open</button>
+ """)
+ button = session.find.css("button", all=False)
+ button.click()
+
+ session.window_handle = wait_for_new_handle(session, orig_handles)
+ session.url = inline("""
+ <script>window.alert("Hello")</script>
+ """)
+
+ response = accept_alert(session)
+ assert_success(response)
+
+ with pytest.raises(NoSuchAlertException):
+ session.alert.text
diff --git a/testing/web-platform/tests/webdriver/tests/add_cookie/__init__.py b/testing/web-platform/tests/webdriver/tests/add_cookie/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/add_cookie/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/add_cookie/add.py b/testing/web-platform/tests/webdriver/tests/add_cookie/add.py
new file mode 100644
index 0000000000..3a19432fc6
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/add_cookie/add.py
@@ -0,0 +1,286 @@
+import pytest
+
+from datetime import datetime, timedelta
+
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.helpers import clear_all_cookies
+
+
+def add_cookie(session, cookie):
+ return session.transport.send(
+ "POST", "session/{session_id}/cookie".format(**vars(session)),
+ {"cookie": cookie})
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/cookie".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_null_response_value(session, url):
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ }
+
+ session.url = url("/common/blank.html")
+ clear_all_cookies(session)
+
+ response = add_cookie(session, new_cookie)
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window):
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ }
+
+ response = add_cookie(session, new_cookie)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ }
+
+ response = add_cookie(session, new_cookie)
+ assert_error(response, "no such window")
+
+
+@pytest.mark.parametrize(
+ "page",
+ [
+ "about:blank",
+ "blob:foo/bar",
+ "data:text/html;charset=utf-8,<p>foo</p>",
+ "file:///foo/bar",
+ "ftp://example.org",
+ "javascript:foo",
+ "ws://example.org",
+ "wss://example.org",
+ ],
+ ids=[
+ "about",
+ "blob",
+ "data",
+ "file",
+ "ftp",
+ "javascript",
+ "websocket",
+ "secure websocket",
+ ],
+)
+def test_cookie_unsupported_scheme(session, page):
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ "domain": page,
+ "path": "/",
+ "httpOnly": False,
+ "secure": False
+ }
+
+ result = add_cookie(session, new_cookie)
+ assert_error(result, "invalid cookie domain")
+
+
+def test_add_domain_cookie(session, url, server_config):
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ "domain": server_config["browser_host"],
+ "path": "/",
+ "httpOnly": False,
+ "secure": False
+ }
+
+ session.url = url("/common/blank.html")
+ clear_all_cookies(session)
+
+ result = add_cookie(session, new_cookie)
+ assert_success(result)
+
+ cookie = session.cookies("hello")
+ assert "domain" in cookie
+ assert isinstance(cookie["domain"], str)
+ assert "name" in cookie
+ assert isinstance(cookie["name"], str)
+ assert "value" in cookie
+ assert isinstance(cookie["value"], str)
+
+ assert cookie["name"] == "hello"
+ assert cookie["value"] == "world"
+ assert cookie["domain"] == server_config["browser_host"] or \
+ cookie["domain"] == ".%s" % server_config["browser_host"]
+
+
+def test_add_cookie_for_ip(session, url, server_config, configuration):
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ "domain": "127.0.0.1",
+ "path": "/",
+ "httpOnly": False,
+ "secure": False
+ }
+
+ session.url = "http://127.0.0.1:%s/common/blank.html" % (server_config["ports"]["http"][0])
+ clear_all_cookies(session)
+
+ result = add_cookie(session, new_cookie)
+ assert_success(result)
+
+ cookie = session.cookies("hello")
+ assert "name" in cookie
+ assert isinstance(cookie["name"], str)
+ assert "value" in cookie
+ assert isinstance(cookie["value"], str)
+ assert "domain" in cookie
+ assert isinstance(cookie["domain"], str)
+
+ assert cookie["name"] == "hello"
+ assert cookie["value"] == "world"
+ assert cookie["domain"] == "127.0.0.1"
+
+
+def test_add_non_session_cookie(session, url):
+ a_day_from_now = int(
+ (datetime.utcnow() + timedelta(days=1) - datetime.utcfromtimestamp(0)).total_seconds())
+
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ "expiry": a_day_from_now
+ }
+
+ session.url = url("/common/blank.html")
+ clear_all_cookies(session)
+
+ result = add_cookie(session, new_cookie)
+ assert_success(result)
+
+ cookie = session.cookies("hello")
+ assert "name" in cookie
+ assert isinstance(cookie["name"], str)
+ assert "value" in cookie
+ assert isinstance(cookie["value"], str)
+ assert "expiry" in cookie
+ assert isinstance(cookie["expiry"], int)
+
+ assert cookie["name"] == "hello"
+ assert cookie["value"] == "world"
+ assert cookie["expiry"] == a_day_from_now
+
+
+def test_add_session_cookie(session, url):
+ new_cookie = {
+ "name": "hello",
+ "value": "world"
+ }
+
+ session.url = url("/common/blank.html")
+ clear_all_cookies(session)
+
+ result = add_cookie(session, new_cookie)
+ assert_success(result)
+
+ cookie = session.cookies("hello")
+ assert "name" in cookie
+ assert isinstance(cookie["name"], str)
+ assert "value" in cookie
+ assert isinstance(cookie["value"], str)
+ if "expiry" in cookie:
+ assert cookie.get("expiry") is None
+
+ assert cookie["name"] == "hello"
+ assert cookie["value"] == "world"
+
+
+def test_add_session_cookie_with_leading_dot_character_in_domain(session, url, server_config):
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ "domain": ".%s" % server_config["browser_host"]
+ }
+
+ session.url = url("/common/blank.html")
+ clear_all_cookies(session)
+
+ result = add_cookie(session, new_cookie)
+ assert_success(result)
+
+ cookie = session.cookies("hello")
+ assert "name" in cookie
+ assert isinstance(cookie["name"], str)
+ assert "value" in cookie
+ assert isinstance(cookie["value"], str)
+ assert "domain" in cookie
+ assert isinstance(cookie["domain"], str)
+
+ assert cookie["name"] == "hello"
+ assert cookie["value"] == "world"
+ assert cookie["domain"] == server_config["browser_host"] or \
+ cookie["domain"] == ".%s" % server_config["browser_host"]
+
+
+@pytest.mark.parametrize("same_site", ["None", "Lax", "Strict"])
+def test_add_cookie_with_valid_samesite_flag(session, url, same_site):
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ "sameSite": same_site
+ }
+
+ session.url = url("/common/blank.html")
+ clear_all_cookies(session)
+
+ result = add_cookie(session, new_cookie)
+ assert_success(result)
+
+ cookie = session.cookies("hello")
+ assert "name" in cookie
+ assert isinstance(cookie["name"], str)
+ assert "value" in cookie
+ assert isinstance(cookie["value"], str)
+ assert "sameSite" in cookie
+ assert isinstance(cookie["sameSite"], str)
+
+ assert cookie["name"] == "hello"
+ assert cookie["value"] == "world"
+ assert cookie["sameSite"] == same_site
+
+
+def test_add_cookie_with_invalid_samesite_flag(session, url):
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ "sameSite": "invalid"
+ }
+
+ session.url = url("/common/blank.html")
+ clear_all_cookies(session)
+
+ response = add_cookie(session, new_cookie)
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("same_site", [False, 12, dict()])
+def test_add_cookie_with_invalid_samesite_type(session, url, same_site):
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ "sameSite": same_site
+ }
+
+ session.url = url("/common/blank.html")
+ clear_all_cookies(session)
+
+ response = add_cookie(session, new_cookie)
+ assert_error(response, "invalid argument")
diff --git a/testing/web-platform/tests/webdriver/tests/add_cookie/user_prompts.py b/testing/web-platform/tests/webdriver/tests/add_cookie/user_prompts.py
new file mode 100644
index 0000000000..f58aacd02a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/add_cookie/user_prompts.py
@@ -0,0 +1,137 @@
+# META: timeout=long
+
+import pytest
+
+from webdriver.error import NoSuchCookieException
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def add_cookie(session, cookie):
+ return session.transport.send(
+ "POST", "session/{session_id}/cookie".format(**vars(session)),
+ {"cookie": cookie})
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, url, create_dialog):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ new_cookie = {
+ "name": "foo",
+ "value": "bar",
+ }
+
+ session.url = url("/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = add_cookie(session, new_cookie)
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.cookies("foo")
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, url, create_dialog):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ new_cookie = {
+ "name": "foo",
+ "value": "bar",
+ }
+
+ session.url = url("/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = add_cookie(session, new_cookie)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ with pytest.raises(NoSuchCookieException):
+ assert session.cookies("foo")
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, url, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ new_cookie = {
+ "name": "foo",
+ "value": "bar",
+ }
+
+ session.url = url("/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = add_cookie(session, new_cookie)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ with pytest.raises(NoSuchCookieException):
+ assert session.cookies("foo")
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/back/__init__.py b/testing/web-platform/tests/webdriver/tests/back/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/back/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/back/back.py b/testing/web-platform/tests/webdriver/tests/back/back.py
new file mode 100644
index 0000000000..62434323e0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/back/back.py
@@ -0,0 +1,169 @@
+import pytest
+from webdriver import error
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def back(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/back".format(**vars(session)))
+
+
+def test_null_response_value(session, inline):
+ session.url = inline("<div>")
+ session.url = inline("<p>")
+
+ response = back(session)
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = back(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = back(session)
+ assert_success(response)
+
+
+def test_no_browsing_history(session):
+ response = back(session)
+ assert_success(response)
+
+
+def test_basic(session, inline):
+ url = inline("<div id=foo>")
+
+ session.url = url
+ session.url = inline("<div id=bar>")
+ element = session.find.css("#bar", all=False)
+
+ response = back(session)
+ assert_success(response)
+
+ with pytest.raises(error.StaleElementReferenceException):
+ element.property("id")
+
+ assert session.url == url
+ assert session.find.css("#foo", all=False)
+
+
+def test_data_urls(session, inline):
+ test_pages = [
+ inline("<p id=1>"),
+ inline("<p id=2>"),
+ ]
+
+ for page in test_pages:
+ session.url = page
+ assert session.url == test_pages[1]
+
+ response = back(session)
+ assert_success(response)
+ assert session.url == test_pages[0]
+
+
+def test_dismissed_beforeunload(session, inline):
+ url_beforeunload = inline("""
+ <input type="text">
+ <script>
+ window.addEventListener("beforeunload", function (event) {
+ event.preventDefault();
+ });
+ </script>
+ """)
+
+ session.url = inline("<div id=foo>")
+ session.url = url_beforeunload
+
+ element = session.find.css("input", all=False)
+ element.send_keys("bar")
+
+ response = back(session)
+ assert_success(response)
+
+ assert session.url != url_beforeunload
+
+
+def test_fragments(session, url):
+ test_pages = [
+ url("/common/blank.html"),
+ url("/common/blank.html#1234"),
+ url("/common/blank.html#5678"),
+ ]
+
+ for page in test_pages:
+ session.url = page
+ assert session.url == test_pages[2]
+
+ response = back(session)
+ assert_success(response)
+ assert session.url == test_pages[1]
+
+ response = back(session)
+ assert_success(response)
+ assert session.url == test_pages[0]
+
+
+def test_history_pushstate(session, inline):
+ pushstate_page = inline("""
+ <script>
+ function pushState() {
+ history.pushState({foo: "bar"}, "", "#pushstate");
+ }
+ </script>
+ <a onclick="javascript:pushState();">click</a>
+ """)
+
+ session.url = pushstate_page
+ session.find.css("a", all=False).click()
+
+ assert session.url == "{}#pushstate".format(pushstate_page)
+ assert session.execute_script("return history.state;") == {"foo": "bar"}
+
+ response = back(session)
+ assert_success(response)
+
+ assert session.url == pushstate_page
+ assert session.execute_script("return history.state;") is None
+
+
+def test_removed_iframe(session, url, inline):
+ page = inline("<p>foo")
+
+ session.url = page
+ session.url = url("/webdriver/tests/support/html/frames_no_bfcache.html")
+
+ subframe = session.find.css("#sub-frame", all=False)
+ session.switch_frame(subframe)
+
+ response = back(session)
+ assert_success(response)
+
+ assert session.url == page
+
+
+# Capability needed as long as no valid certificate is available:
+# https://github.com/web-platform-tests/wpt/issues/28847
+@pytest.mark.capabilities({"acceptInsecureCerts": True})
+def test_cross_origin(session, url):
+ base_path = ("/webdriver/tests/support/html/subframe.html" +
+ "?pipe=header(Cross-Origin-Opener-Policy,same-origin")
+ first_page = url(base_path, protocol="https")
+ second_page = url(base_path, protocol="https", domain="alt")
+
+ session.url = first_page
+ session.url = second_page
+
+ elem = session.find.css("#delete", all=False)
+
+ response = back(session)
+ assert_success(response)
+
+ assert session.url == first_page
+
+ with pytest.raises(error.NoSuchElementException):
+ elem.click()
+ elem = session.find.css("#delete", all=False)
diff --git a/testing/web-platform/tests/webdriver/tests/back/conftest.py b/testing/web-platform/tests/webdriver/tests/back/conftest.py
new file mode 100644
index 0000000000..bd5db0cfeb
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/back/conftest.py
@@ -0,0 +1,19 @@
+import pytest
+
+from webdriver.error import NoSuchWindowException
+
+
+@pytest.fixture(name="session")
+def fixture_session(capabilities, session):
+ """Prevent re-using existent history by running the test in a new window."""
+ original_handle = session.window_handle
+ session.window_handle = session.new_window()
+
+ yield session
+
+ try:
+ session.window.close()
+ except NoSuchWindowException:
+ pass
+
+ session.window_handle = original_handle
diff --git a/testing/web-platform/tests/webdriver/tests/back/user_prompts.py b/testing/web-platform/tests/webdriver/tests/back/user_prompts.py
new file mode 100644
index 0000000000..9d04f0f4ab
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/back/user_prompts.py
@@ -0,0 +1,118 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def back(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/back".format(**vars(session)))
+
+
+@pytest.fixture
+def pages(session, inline):
+ pages = [
+ inline("<p id=1>"),
+ inline("<p id=2>"),
+ ]
+
+ for page in pages:
+ session.url = page
+
+ return pages
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, pages):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = back(session)
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.url == pages[0]
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, pages):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = back(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.url == pages[1]
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, pages):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = back(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert session.url == pages[1]
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type):
+ # retval not testable for confirm and prompt because window is gone
+ check_user_prompt_closed_without_exception(dialog_type, None)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type):
+ # retval not testable for confirm and prompt because window is gone
+ check_user_prompt_closed_without_exception(dialog_type, None)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/__init__.py
new file mode 100644
index 0000000000..6ac8c3394c
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/__init__.py
@@ -0,0 +1,77 @@
+from typing import Any, Callable
+
+from webdriver.bidi.modules.script import ContextTarget
+
+
+# Compares 2 objects recursively.
+# Actual value can have more keys as part of the forwards-compat design.
+# Expected value can be a callable delegate, asserting the value.
+def recursive_compare(expected: Any, actual: Any) -> None:
+ if callable(expected):
+ expected(actual)
+ return
+
+ assert type(expected) == type(actual)
+ if type(expected) is list:
+ assert len(expected) == len(actual)
+ for index, _ in enumerate(expected):
+ recursive_compare(expected[index], actual[index])
+ return
+
+ if type(expected) is dict:
+ # Actual dict can have more keys as part of the forwards-compat design.
+ assert expected.keys() <= actual.keys(), \
+ f"Key set should be present: {set(expected.keys()) - set(actual.keys())}"
+ for key in expected.keys():
+ recursive_compare(expected[key], actual[key])
+ return
+
+ assert expected == actual
+
+
+def any_bool(actual: Any) -> None:
+ assert isinstance(actual, bool)
+
+
+def any_dict(actual: Any) -> None:
+ assert isinstance(actual, dict)
+
+
+def any_int(actual: Any) -> None:
+ assert isinstance(actual, int)
+
+
+def any_int_or_null(actual: Any) -> None:
+ if actual is not None:
+ any_int(actual)
+
+
+def any_list(actual: Any) -> None:
+ assert isinstance(actual, list)
+
+
+def any_string(actual: Any) -> None:
+ assert isinstance(actual, str)
+
+
+def any_string_or_null(actual: Any) -> None:
+ if actual is not None:
+ any_string(actual)
+
+
+def int_interval(start: int, end: int) -> Callable[[Any], None]:
+ def _(actual: Any) -> None:
+ any_int(actual)
+ assert start <= actual <= end
+
+ return _
+
+
+async def create_console_api_message(bidi_session, context, text):
+ await bidi_session.script.call_function(
+ function_declaration="""(text) => console.log(text)""",
+ arguments=[{"type": "string", "value": text}],
+ await_promise=False,
+ target=ContextTarget(context["context"]),
+ )
+ return text
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/__init__.py
new file mode 100644
index 0000000000..a887aeb8a4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/__init__.py
@@ -0,0 +1,65 @@
+from .. import (
+ any_int,
+ any_string,
+ any_string_or_null,
+ recursive_compare,
+)
+
+def assert_browsing_context(
+ info, context, children=None, is_root=True, parent=None, url=None
+):
+ assert "children" in info
+ if children is not None:
+ assert isinstance(info["children"], list)
+ assert len(info["children"]) == children
+ else:
+ assert info["children"] is None
+
+ assert "context" in info
+ assert isinstance(info["context"], str)
+ # Note: Only the tests for browsingContext.getTree should be allowed to
+ # pass None here because it's not possible to assert the exact browsing
+ # context id for frames.
+ if context is not None:
+ assert info["context"] == context
+
+ if is_root:
+ if parent is None:
+ # For a top-level browsing context there is no parent
+ assert info["parent"] is None
+ else:
+ assert "parent" in info
+ assert isinstance(info["parent"], str)
+ assert info["parent"] == parent
+ else:
+ # non root browsing context entries do not contain a parent
+ assert "parent" not in info
+ assert parent is None
+
+ assert "url" in info
+ assert isinstance(info["url"], str)
+ assert info["url"] == url
+
+
+def assert_navigation_info(event, expected_navigation_info):
+ recursive_compare(
+ {
+ "context": any_string,
+ "navigation": any_string_or_null,
+ "timestamp": any_int,
+ "url": any_string,
+ },
+ event,
+ )
+
+ if "context" in expected_navigation_info:
+ assert event["context"] == expected_navigation_info["context"]
+
+ if "navigation" in expected_navigation_info:
+ assert event["navigation"] == expected_navigation_info["navigation"]
+
+ if "timestamp" in expected_navigation_info:
+ expected_navigation_info["timestamp"](event["timestamp"])
+
+ if "url" in expected_navigation_info:
+ assert event["url"] == expected_navigation_info["url"]
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/__init__.py
new file mode 100644
index 0000000000..c15b76baf4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/__init__.py
@@ -0,0 +1,94 @@
+from base64 import encodebytes
+
+from tests.support.image import png_dimensions
+from webdriver.bidi.modules.script import ContextTarget
+
+
+async def viewport_dimensions(bidi_session, context):
+ """Get the dimensions of the viewport containing context.
+
+ :param bidi_session: BiDiSession
+ :param context: Browsing context ID
+ :returns: Tuple of (int, int) containing viewport width, viewport height.
+ """
+ result = await bidi_session.script.call_function(
+ function_declaration="""() => {
+ const {devicePixelRatio, innerHeight, innerWidth} = window;
+
+ return [
+ Math.floor(innerWidth * devicePixelRatio),
+ Math.floor(innerHeight * devicePixelRatio)
+ ];
+ }""",
+ target=ContextTarget(context["context"]),
+ await_promise=False)
+ return tuple(item["value"] for item in result["value"])
+
+
+class ImageDifference:
+ """Summary of the pixel-level differences between two images.
+
+ :param total_pixels: The total number of pixel differences between the images
+ :param max_difference: The maximum difference between any corresponding color channels across
+ all pixels of the image.
+ """
+
+ def __init__(self, total_pixels, max_difference):
+ self.total_pixels = total_pixels
+ self.max_difference = max_difference
+
+ def equal(self):
+ return self.total_pixels == 0
+
+
+async def compare_png_data(bidi_session,
+ url,
+ img1: bytes,
+ img2: bytes) -> ImageDifference:
+ """Calculate difference statistics between two PNG images.
+
+ :param bidi_session: BidiSession
+ :param url: fixture to construct a URL string given a path
+ :param img1: Bytes of first PNG image
+ :param img2: Bytes of second PNG image
+ :returns: ImageDifference representing the total number of different pixels,
+ and maximum per-channel difference between the images.
+ """
+ if img1 == img2:
+ return ImageDifference(0, 0)
+
+ width, height = png_dimensions(img1)
+ assert (width, height) == png_dimensions(img2)
+
+ context = await bidi_session.browsing_context.create(type_hint="tab")
+ await bidi_session.browsing_context.navigate(
+ context=context["context"],
+ url=url("/webdriver/tests/support/html/render.html"),
+ wait="complete")
+ result = await bidi_session.script.call_function(
+ function_declaration="""(img1, img2, width, height) => {
+ return compare(img1, img2, width, height)
+ }""",
+ target=ContextTarget(context["context"]),
+ arguments=[{"type": "string",
+ "value": encodebytes(img1).decode()},
+ {"type": "string",
+ "value": encodebytes(img2).decode()},
+ {"type": "number",
+ "value": width},
+ {"type": "number",
+ "value": height}],
+ await_promise=True)
+ await bidi_session.browsing_context.close(context=context["context"])
+ assert result["type"] == "object"
+ assert set(item[0] for item in result["value"]) == {"totalPixels", "maxDifference"}
+ for item in result["value"]:
+ assert len(item) == 2
+ assert item[1]["type"] == "number"
+ if item[0] == "totalPixels":
+ total_pixels = item[1]["value"]
+ elif item[0] == "maxDifference":
+ max_difference = item[1]["value"]
+ else:
+ raise Exception(f"Unexpected object key ${item[0]}")
+ return ImageDifference(total_pixels, max_difference)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/capture_screenshot.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/capture_screenshot.py
new file mode 100644
index 0000000000..be853ddc42
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/capture_screenshot.py
@@ -0,0 +1,42 @@
+import pytest
+from tests.support.image import png_dimensions
+
+from . import compare_png_data, viewport_dimensions
+
+
+@pytest.mark.asyncio
+async def test_capture(bidi_session, url, top_context, inline):
+ expected_size = await viewport_dimensions(bidi_session, top_context)
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url="about:blank", wait="complete"
+ )
+ reference_data = await bidi_session.browsing_context.capture_screenshot(
+ context=top_context["context"])
+ assert png_dimensions(reference_data) == expected_size
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=inline("<div>foo</div>"), wait="complete"
+ )
+ data = await bidi_session.browsing_context.capture_screenshot(
+ context=top_context["context"])
+
+ comparison = await compare_png_data(bidi_session,
+ url,
+ reference_data,
+ data)
+ assert not comparison.equal()
+
+ # Take a second screenshot that should be identical to validate that
+ # we don't just always return false here
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=inline("<div>foo</div>"), wait="complete"
+ )
+ new_data = await bidi_session.browsing_context.capture_screenshot(
+ context=top_context["context"])
+
+ comparison = await compare_png_data(bidi_session,
+ url,
+ data,
+ new_data)
+ assert comparison.equal()
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/frame.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/frame.py
new file mode 100644
index 0000000000..6fb9d49cf2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/frame.py
@@ -0,0 +1,61 @@
+import pytest
+from tests.support.image import png_dimensions
+from tests.support.screenshot import (
+ DEFAULT_CONTENT,
+ INNER_IFRAME_STYLE,
+ OUTER_IFRAME_STYLE,
+ REFERENCE_CONTENT,
+ REFERENCE_STYLE,
+)
+
+from . import compare_png_data, viewport_dimensions
+
+
+@pytest.mark.asyncio
+async def test_iframe(bidi_session, top_context, inline, iframe):
+ viewport_size = await viewport_dimensions(bidi_session, top_context)
+
+ iframe_content = f"{INNER_IFRAME_STYLE}{DEFAULT_CONTENT}"
+ url = inline(f"{OUTER_IFRAME_STYLE}{iframe(iframe_content)}")
+ await bidi_session.browsing_context.navigate(context=top_context["context"],
+ url=url,
+ wait="complete")
+ reference_data = await bidi_session.browsing_context.capture_screenshot(
+ context=top_context["context"])
+ assert png_dimensions(reference_data) == viewport_size
+
+ all_contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"])
+ frame_context = all_contexts[0]["children"][0]
+
+ data = await bidi_session.browsing_context.capture_screenshot(context=frame_context["context"])
+
+ assert png_dimensions(data) < png_dimensions(reference_data)
+
+
+@pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"])
+@pytest.mark.asyncio
+async def test_context_origin(bidi_session, url, top_context, inline, iframe, domain):
+ expected_size = await viewport_dimensions(bidi_session, top_context)
+
+ initial_url = inline(f"{REFERENCE_STYLE}{REFERENCE_CONTENT}")
+ await bidi_session.browsing_context.navigate(context=top_context["context"],
+ url=initial_url,
+ wait="complete")
+
+ reference_data = await bidi_session.browsing_context.capture_screenshot(
+ context=top_context["context"])
+ assert png_dimensions(reference_data) == expected_size
+
+ iframe_content = f"{INNER_IFRAME_STYLE}{DEFAULT_CONTENT}"
+ new_url = inline(f"{OUTER_IFRAME_STYLE}{iframe(iframe_content, domain=domain)}")
+ await bidi_session.browsing_context.navigate(context=top_context["context"],
+ url=new_url,
+ wait="complete")
+
+ data = await bidi_session.browsing_context.capture_screenshot(context=top_context["context"])
+ comparison = await compare_png_data(bidi_session,
+ url,
+ reference_data,
+ data)
+
+ assert comparison.equal()
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/invalid.py
new file mode 100644
index 0000000000..e30a0d3c99
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/capture_screenshot/invalid.py
@@ -0,0 +1,26 @@
+import pytest
+import webdriver.bidi.error as error
+
+
+@pytest.mark.parametrize("value", [None, False, 42, {}, []])
+@pytest.mark.asyncio
+async def test_params_context_invalid_type(bidi_session, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.capture_screenshot(context=value)
+
+
+@pytest.mark.asyncio
+async def test_invalid_frame(bidi_session, top_context, inline):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.browsing_context.capture_screenshot(context="_invalid_")
+
+
+@pytest.mark.asyncio
+async def test_closed_frame(bidi_session, top_context, inline, add_and_remove_iframe):
+ url = inline("<div>foo</div>")
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=url, wait="complete"
+ )
+ frame_id = await add_and_remove_iframe(top_context)
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.browsing_context.capture_screenshot(context=frame_id)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/window_handle.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/window_handle.py
new file mode 100644
index 0000000000..4f36fba197
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/classic_interop/window_handle.py
@@ -0,0 +1,7 @@
+import pytest
+
+pytestmark = pytest.mark.asyncio
+
+
+async def test_top_level_context_id_equals_window_handle(top_context, current_session):
+ assert top_context["context"] == current_session.window_handle
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/close.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/close.py
new file mode 100644
index 0000000000..21bf7411e5
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/close.py
@@ -0,0 +1,23 @@
+import pytest
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("type_hint", ["window", "tab"])
+async def test_top_level_context(bidi_session, type_hint):
+ top_level_context = await bidi_session.browsing_context.create(
+ type_hint=type_hint
+ )
+
+ contexts = await bidi_session.browsing_context.get_tree()
+ assert len(contexts) == 2
+
+ await bidi_session.browsing_context.close(context=top_level_context["context"])
+
+ contexts = await bidi_session.browsing_context.get_tree()
+ assert len(contexts) == 1
+
+ assert contexts[0]["context"] != top_level_context["context"]
+
+ # TODO: Add a test for closing the last tab once the behavior has been specified
+ # https://github.com/w3c/webdriver-bidi/issues/187
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/invalid.py
new file mode 100644
index 0000000000..7c73a83b13
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/close/invalid.py
@@ -0,0 +1,31 @@
+import pytest
+import webdriver.bidi.error as error
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("value", [None, False, 42, {}, []])
+async def test_params_context_invalid_type(bidi_session, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.close(context=value)
+
+
+async def test_params_context_invalid_value(bidi_session):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.browsing_context.close(context="foo")
+
+
+async def test_child_context(bidi_session, test_page_same_origin_frame, top_context):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_same_origin_frame, wait="complete"
+ )
+
+ all_contexts = await bidi_session.browsing_context.get_tree()
+
+ assert len(all_contexts) == 1
+ parent_info = all_contexts[0]
+ assert len(parent_info["children"]) == 1
+ child_info = parent_info["children"][0]
+
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.close(context=child_info["context"])
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/context_created.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/context_created.py
new file mode 100644
index 0000000000..93be00bbb6
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/context_created/context_created.py
@@ -0,0 +1,225 @@
+import asyncio
+
+import pytest
+from tests.support.sync import AsyncPoll
+from webdriver.bidi.modules.script import ContextTarget
+from webdriver.error import TimeoutException
+
+from .. import assert_browsing_context
+
+pytestmark = pytest.mark.asyncio
+
+CONTEXT_CREATED_EVENT = "browsingContext.contextCreated"
+
+
+async def test_not_unsubscribed(bidi_session):
+ await bidi_session.session.subscribe(events=[CONTEXT_CREATED_EVENT])
+ await bidi_session.session.unsubscribe(events=[CONTEXT_CREATED_EVENT])
+
+ # Track all received browsingContext.contextCreated events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener(CONTEXT_CREATED_EVENT, on_event)
+
+ await bidi_session.browsing_context.create(type_hint="tab")
+
+ wait = AsyncPoll(bidi_session, timeout=0.5)
+ with pytest.raises(TimeoutException):
+ await wait.until(lambda _: len(events) > 0)
+
+ remove_listener()
+
+
+@pytest.mark.parametrize("type_hint", ["tab", "window"])
+async def test_new_context(bidi_session, wait_for_event, subscribe_events, type_hint):
+ await subscribe_events([CONTEXT_CREATED_EVENT])
+
+ on_entry = wait_for_event(CONTEXT_CREATED_EVENT)
+ top_level_context = await bidi_session.browsing_context.create(type_hint="tab")
+ context_info = await on_entry
+
+ assert_browsing_context(
+ context_info,
+ top_level_context["context"],
+ children=None,
+ url="about:blank",
+ parent=None,
+ )
+
+
+async def test_evaluate_window_open_without_url(bidi_session, subscribe_events, wait_for_event, top_context):
+ await subscribe_events([CONTEXT_CREATED_EVENT])
+
+ on_entry = wait_for_event(CONTEXT_CREATED_EVENT)
+
+ await bidi_session.script.evaluate(
+ expression="""window.open();""",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False)
+
+ context_info = await on_entry
+
+ assert_browsing_context(
+ context_info,
+ context=None,
+ children=None,
+ url="about:blank",
+ parent=None,
+ )
+
+
+async def test_evaluate_window_open_with_url(bidi_session, subscribe_events, wait_for_event, inline, top_context):
+ url = inline("<div>foo</div>")
+
+ await subscribe_events([CONTEXT_CREATED_EVENT])
+
+ on_entry = wait_for_event(CONTEXT_CREATED_EVENT)
+
+ await bidi_session.script.evaluate(
+ expression=f"""window.open("{url}");""",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False)
+ context_info = await on_entry
+
+ assert_browsing_context(
+ context_info,
+ context=None,
+ children=None,
+ url="about:blank",
+ parent=None,
+ )
+
+
+async def test_navigate_creates_iframes(bidi_session, subscribe_events, top_context, test_page_multiple_frames):
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener(CONTEXT_CREATED_EVENT, on_event)
+ await subscribe_events([CONTEXT_CREATED_EVENT])
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_multiple_frames, wait="complete"
+ )
+
+ wait = AsyncPoll(
+ bidi_session, message="Didn't receive context created events for frames"
+ )
+ await wait.until(lambda _: len(events) >= 2)
+ assert len(events) == 2
+
+ # Get all browsing contexts from the first tab
+ contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"])
+
+ assert len(contexts) == 1
+ root_info = contexts[0]
+ children_info = root_info["children"]
+ assert len(children_info) == 2
+
+ # Note: Live `browsingContext.contextCreated` events are always created with "about:blank":
+ # https://github.com/w3c/webdriver-bidi/issues/220#issuecomment-1145785349
+ assert_browsing_context(
+ events[0],
+ children_info[0]["context"],
+ children=None,
+ url="about:blank",
+ parent=root_info["context"],
+ )
+
+ assert_browsing_context(
+ events[1],
+ children_info[1]["context"],
+ children=None,
+ url="about:blank",
+ parent=root_info["context"],
+ )
+
+ remove_listener()
+
+
+async def test_navigate_creates_nested_iframes(bidi_session, subscribe_events, top_context, test_page_nested_frames):
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener(CONTEXT_CREATED_EVENT, on_event)
+ await subscribe_events([CONTEXT_CREATED_EVENT])
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_nested_frames, wait="complete"
+ )
+
+ wait = AsyncPoll(
+ bidi_session, message="Didn't receive context created events for frames"
+ )
+ await wait.until(lambda _: len(events) >= 2)
+ assert len(events) == 2
+
+ # Get all browsing contexts from the first tab
+ contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"])
+
+ assert len(contexts) == 1
+ root_info = contexts[0]
+ assert len(root_info["children"]) == 1
+ child1_info = root_info["children"][0]
+ assert len(child1_info["children"]) == 1
+ child2_info = child1_info["children"][0]
+
+ # Note: `browsingContext.contextCreated` is always created with "about:blank":
+ # https://github.com/w3c/webdriver-bidi/issues/220#issuecomment-1145785349
+ assert_browsing_context(
+ events[0],
+ child1_info["context"],
+ children=None,
+ url="about:blank",
+ parent=root_info["context"],
+ )
+
+ assert_browsing_context(
+ events[1],
+ child2_info["context"],
+ children=None,
+ url="about:blank",
+ parent=child1_info["context"],
+ )
+
+ remove_listener()
+
+
+async def test_subscribe_to_one_context(
+ bidi_session, subscribe_events, top_context, test_page_same_origin_frame
+):
+ # Subscribe to a specific context
+ await subscribe_events(
+ events=[CONTEXT_CREATED_EVENT], contexts=[top_context["context"]]
+ )
+
+ # Track all received browsingContext.contextCreated events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener(CONTEXT_CREATED_EVENT, on_event)
+
+ await bidi_session.browsing_context.create(type_hint="tab")
+
+ # Make sure we didn't receive the event for the new tab
+ wait = AsyncPoll(bidi_session, timeout=0.5)
+ with pytest.raises(TimeoutException):
+ await wait.until(lambda _: len(events) > 0)
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_same_origin_frame, wait="complete"
+ )
+
+ # Make sure we received the event for the iframe
+ await wait.until(lambda _: len(events) >= 1)
+ assert len(events) == 1
+
+ remove_listener()
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/invalid.py
new file mode 100644
index 0000000000..2d60e08476
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/invalid.py
@@ -0,0 +1,53 @@
+import pytest
+import webdriver.bidi.error as error
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("value", [False, 42, {}, []])
+async def test_params_reference_context_invalid_type(bidi_session, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.create(
+ type_hint="tab", reference_context=value
+ )
+
+
+async def test_params_reference_context_invalid_value(bidi_session):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.browsing_context.create(
+ type_hint="tab", reference_context="foo"
+ )
+
+
+async def test_params_reference_context_non_top_level(
+ bidi_session, test_page_same_origin_frame, top_context
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"],
+ url=test_page_same_origin_frame,
+ wait="complete",
+ )
+
+ all_contexts = await bidi_session.browsing_context.get_tree()
+
+ assert len(all_contexts) == 1
+ parent_info = all_contexts[0]
+ assert len(parent_info["children"]) == 1
+ child_info = parent_info["children"][0]
+
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.create(
+ type_hint="tab", reference_context=child_info["context"]
+ )
+
+
+@pytest.mark.parametrize("value", [None, False, 42, {}, []])
+async def test_params_type_invalid_type(bidi_session, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.create(type_hint=value)
+
+
+@pytest.mark.parametrize("value", ["", "foo"])
+async def test_params_type_invalid_value(bidi_session, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.create(type_hint=value)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/reference_context.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/reference_context.py
new file mode 100644
index 0000000000..f8a834069a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/reference_context.py
@@ -0,0 +1,46 @@
+import pytest
+
+from .. import assert_browsing_context
+from webdriver.bidi.modules.script import ContextTarget
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("value", ["tab", "window"])
+async def test_reference_context(bidi_session, value):
+ contexts = await bidi_session.browsing_context.get_tree(max_depth=0)
+ assert len(contexts) == 1
+
+ reference_context = await bidi_session.browsing_context.create(type_hint="tab")
+ contexts = await bidi_session.browsing_context.get_tree(max_depth=0)
+ assert len(contexts) == 2
+
+ new_context = await bidi_session.browsing_context.create(
+ reference_context=reference_context["context"], type_hint=value
+ )
+ assert contexts[0]["context"] != new_context["context"]
+ assert contexts[0]["context"] != new_context["context"]
+
+ contexts = await bidi_session.browsing_context.get_tree(max_depth=0)
+ assert len(contexts) == 3
+
+ # Retrieve the new context info
+ contexts = await bidi_session.browsing_context.get_tree(
+ max_depth=0, root=new_context["context"]
+ )
+
+ assert_browsing_context(
+ contexts[0],
+ new_context["context"],
+ children=None,
+ is_root=True,
+ parent=None,
+ url="about:blank",
+ )
+
+ # We can not assert the specific behavior of reference_context here,
+ # so we only verify that a new browsing context was successfully created
+ # when a valid reference_context is provided.
+
+ await bidi_session.browsing_context.close(context=reference_context["context"])
+ await bidi_session.browsing_context.close(context=new_context["context"])
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/type.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/type.py
new file mode 100644
index 0000000000..55ce7b4428
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/create/type.py
@@ -0,0 +1,41 @@
+import pytest
+
+from .. import assert_browsing_context
+from webdriver.bidi.modules.script import ContextTarget
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("value", ["tab", "window"])
+async def test_type(bidi_session, value):
+ contexts = await bidi_session.browsing_context.get_tree(max_depth=0)
+ assert len(contexts) == 1
+
+ new_context = await bidi_session.browsing_context.create(type_hint=value)
+ assert contexts[0]["context"] != new_context["context"]
+
+ # Check there is an additional browsing context
+ contexts = await bidi_session.browsing_context.get_tree(max_depth=0)
+ assert len(contexts) == 2
+
+ # Retrieve the new context info
+ contexts = await bidi_session.browsing_context.get_tree(
+ max_depth=0, root=new_context["context"]
+ )
+
+ assert_browsing_context(
+ contexts[0],
+ new_context["context"],
+ children=None,
+ is_root=True,
+ parent=None,
+ url="about:blank",
+ )
+
+ opener_protocol_value = await bidi_session.script.evaluate(
+ expression="!!window.opener",
+ target=ContextTarget(new_context["context"]),
+ await_promise=False)
+ assert opener_protocol_value["value"] is False
+
+ await bidi_session.browsing_context.close(context=new_context["context"])
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/dom_content_loaded.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/dom_content_loaded.py
new file mode 100644
index 0000000000..fe28005ae0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/dom_content_loaded/dom_content_loaded.py
@@ -0,0 +1,121 @@
+import pytest
+from tests.support.sync import AsyncPoll
+
+from ... import int_interval
+from .. import assert_navigation_info
+
+pytestmark = pytest.mark.asyncio
+
+DOM_CONTENT_LOADED_EVENT = "browsingContext.domContentLoaded"
+
+
+async def test_unsubscribe(bidi_session, inline, top_context):
+ await bidi_session.session.subscribe(events=[DOM_CONTENT_LOADED_EVENT])
+ await bidi_session.session.unsubscribe(events=[DOM_CONTENT_LOADED_EVENT])
+
+ # Track all received browsingContext.domContentLoaded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener(
+ DOM_CONTENT_LOADED_EVENT, on_event
+ )
+
+ url = inline("<div>foo</div>")
+
+ # When navigation reaches complete state,
+ # we should have received a browsingContext.domContentLoaded event
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=url, wait="complete"
+ )
+
+ assert len(events) == 0
+
+ remove_listener()
+
+
+async def test_subscribe(bidi_session, subscribe_events, inline, new_tab, wait_for_event):
+ await subscribe_events(events=[DOM_CONTENT_LOADED_EVENT])
+
+ on_entry = wait_for_event(DOM_CONTENT_LOADED_EVENT)
+ url = inline("<div>foo</div>")
+ await bidi_session.browsing_context.navigate(context=new_tab["context"], url=url)
+ event = await on_entry
+
+ assert_navigation_info(event, {"context": new_tab["context"], "url": url})
+
+
+async def test_timestamp(bidi_session, current_time, subscribe_events, inline, new_tab, wait_for_event):
+ await subscribe_events(events=[DOM_CONTENT_LOADED_EVENT])
+
+ time_start = await current_time()
+
+ on_entry = wait_for_event(DOM_CONTENT_LOADED_EVENT)
+ url = inline("<div>foo</div>")
+ await bidi_session.browsing_context.navigate(context=new_tab["context"], url=url)
+ event = await on_entry
+
+ time_end = await current_time()
+
+ assert_navigation_info(
+ event,
+ {"context": new_tab["context"], "timestamp": int_interval(time_start, time_end)}
+ )
+
+
+async def test_iframe(bidi_session, subscribe_events, new_tab, test_page, test_page_same_origin_frame):
+ events = []
+
+ async def on_event(method, data):
+ # Filter out events for about:blank to avoid browser differences
+ if data["url"] != 'about:blank':
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener(
+ DOM_CONTENT_LOADED_EVENT, on_event
+ )
+ await subscribe_events(events=[DOM_CONTENT_LOADED_EVENT])
+
+ await bidi_session.browsing_context.navigate(
+ context=new_tab["context"], url=test_page_same_origin_frame
+ )
+
+ wait = AsyncPoll(
+ bidi_session, message="Didn't receive dom content loaded events for frames"
+ )
+ await wait.until(lambda _: len(events) >= 2)
+ assert len(events) == 2
+
+ contexts = await bidi_session.browsing_context.get_tree(root=new_tab["context"])
+
+ assert len(contexts) == 1
+ root_info = contexts[0]
+ assert len(root_info["children"]) == 1
+ child_info = root_info["children"][0]
+
+ # The ordering of the domContentLoaded event is not guaranteed between the
+ # root page and the iframe, find the appropriate events in the current list.
+ first_is_root = events[0]["context"] == root_info["context"]
+ root_event = events[0] if first_is_root else events[1]
+ child_event = events[1] if first_is_root else events[0]
+
+ assert_navigation_info(
+ root_event,
+ {"context": root_info["context"], "url": test_page_same_origin_frame}
+ )
+ assert_navigation_info(child_event, {"context": child_info["context"], "url": test_page})
+
+ remove_listener()
+
+
+@pytest.mark.parametrize("type_hint", ["tab", "window"])
+async def test_new_context(bidi_session, subscribe_events, wait_for_event, type_hint):
+ await subscribe_events(events=[DOM_CONTENT_LOADED_EVENT])
+
+ on_entry = wait_for_event(DOM_CONTENT_LOADED_EVENT)
+ new_context = await bidi_session.browsing_context.create(type_hint=type_hint)
+ event = await on_entry
+
+ assert_navigation_info(event, {"context": new_context["context"], "url": "about:blank"})
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/frames.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/frames.py
new file mode 100644
index 0000000000..b1936d31d0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/frames.py
@@ -0,0 +1,90 @@
+import pytest
+
+from .. import assert_browsing_context
+
+pytestmark = pytest.mark.asyncio
+
+
+async def test_multiple_frames(
+ bidi_session,
+ top_context,
+ test_page,
+ test_page2,
+ test_page_multiple_frames,
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_multiple_frames, wait="complete"
+ )
+
+ # First retrieve all browsing contexts of the first tab
+ top_level_context_id = top_context["context"]
+ all_contexts = await bidi_session.browsing_context.get_tree(root=top_level_context_id)
+
+ assert len(all_contexts) == 1
+ root_info = all_contexts[0]
+ assert_browsing_context(
+ root_info,
+ top_level_context_id,
+ children=2,
+ parent=None,
+ url=test_page_multiple_frames,
+ )
+
+ child1_info = root_info["children"][0]
+ assert_browsing_context(
+ child1_info,
+ context=None,
+ children=0,
+ is_root=False,
+ parent=None,
+ url=test_page,
+ )
+ assert child1_info["context"] != root_info["context"]
+
+ child2_info = root_info["children"][1]
+ assert_browsing_context(
+ child2_info,
+ context=None,
+ children=0,
+ is_root=False,
+ parent=None,
+ url=test_page2,
+ )
+ assert child2_info["context"] != root_info["context"]
+ assert child2_info["context"] != child1_info["context"]
+
+
+async def test_cross_origin(
+ bidi_session,
+ top_context,
+ test_page_cross_origin,
+ test_page_cross_origin_frame,
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_cross_origin_frame, wait="complete"
+ )
+
+ # First retrieve all browsing contexts of the first tab
+ top_level_context_id = top_context["context"]
+ all_contexts = await bidi_session.browsing_context.get_tree(root=top_level_context_id)
+
+ assert len(all_contexts) == 1
+ root_info = all_contexts[0]
+ assert_browsing_context(
+ root_info,
+ top_level_context_id,
+ children=1,
+ parent=None,
+ url=test_page_cross_origin_frame,
+ )
+
+ child1_info = root_info["children"][0]
+ assert_browsing_context(
+ child1_info,
+ context=None,
+ children=0,
+ is_root=False,
+ parent=None,
+ url=test_page_cross_origin,
+ )
+ assert child1_info["context"] != root_info["context"]
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/invalid.py
new file mode 100644
index 0000000000..dbc93155e9
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/invalid.py
@@ -0,0 +1,27 @@
+import pytest
+import webdriver.bidi.error as error
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("value", [False, "foo", {}, []])
+async def test_params_max_depth_invalid_type(bidi_session, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.get_tree(max_depth=value)
+
+
+@pytest.mark.parametrize("value", [-1, 1.1, 2**53])
+async def test_params_max_depth_invalid_value(bidi_session, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.get_tree(max_depth=value)
+
+
+@pytest.mark.parametrize("value", [False, 42, {}, []])
+async def test_params_root_invalid_type(bidi_session, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.get_tree(root=value)
+
+
+async def test_params_root_invalid_value(bidi_session):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.browsing_context.get_tree(root="foo")
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/max_depth.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/max_depth.py
new file mode 100644
index 0000000000..ca1d0edfa1
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/max_depth.py
@@ -0,0 +1,121 @@
+import pytest
+
+from .. import assert_browsing_context
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("value", [0, 2**53 - 1])
+async def test_params_boundaries(bidi_session, value):
+ await bidi_session.browsing_context.get_tree(max_depth=value)
+
+
+async def test_null(
+ bidi_session,
+ top_context,
+ test_page,
+ test_page_same_origin_frame,
+ test_page_nested_frames,
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_nested_frames, wait="complete"
+ )
+
+ # Retrieve browsing contexts for first tab only
+ top_level_context_id = top_context["context"]
+ contexts = await bidi_session.browsing_context.get_tree(root=top_level_context_id)
+
+ assert len(contexts) == 1
+ root_info = contexts[0]
+ assert_browsing_context(
+ root_info,
+ top_level_context_id,
+ children=1,
+ parent=None,
+ url=test_page_nested_frames,
+ )
+
+ child1_info = root_info["children"][0]
+ assert_browsing_context(
+ child1_info,
+ context=None,
+ children=1,
+ is_root=False,
+ parent=None,
+ url=test_page_same_origin_frame,
+ )
+ assert child1_info["context"] != root_info["context"]
+
+ child2_info = child1_info["children"][0]
+ assert_browsing_context(
+ child2_info,
+ context=None,
+ children=0,
+ is_root=False,
+ parent=None,
+ url=test_page,
+ )
+ assert child2_info["context"] != root_info["context"]
+ assert child2_info["context"] != child1_info["context"]
+
+
+async def test_top_level_only(bidi_session, top_context, test_page_nested_frames):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_nested_frames, wait="complete"
+ )
+
+ # Retrieve browsing contexts for first tab only
+ top_level_context_id = top_context["context"]
+ contexts = await bidi_session.browsing_context.get_tree(
+ max_depth=0,
+ root=top_level_context_id
+ )
+
+ assert len(contexts) == 1
+ root_info = contexts[0]
+ assert_browsing_context(
+ root_info,
+ top_level_context_id,
+ children=None,
+ parent=None,
+ url=test_page_nested_frames,
+ )
+
+
+async def test_top_level_and_one_child(
+ bidi_session,
+ top_context,
+ test_page_nested_frames,
+ test_page_same_origin_frame,
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_nested_frames, wait="complete"
+ )
+
+ # Retrieve browsing contexts for first tab only
+ top_level_context_id = top_context["context"]
+ contexts = await bidi_session.browsing_context.get_tree(
+ max_depth=1,
+ root=top_level_context_id
+ )
+
+ assert len(contexts) == 1
+ root_info = contexts[0]
+ assert_browsing_context(
+ root_info,
+ top_level_context_id,
+ children=1,
+ parent=None,
+ url=test_page_nested_frames,
+ )
+
+ child1_info = root_info["children"][0]
+ assert_browsing_context(
+ child1_info,
+ context=None,
+ children=None,
+ is_root=False,
+ parent=None,
+ url=test_page_same_origin_frame,
+ )
+ assert child1_info["context"] != root_info["context"]
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/root.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/root.py
new file mode 100644
index 0000000000..74d11c6003
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/get_tree/root.py
@@ -0,0 +1,113 @@
+import pytest
+
+from .. import assert_browsing_context
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("type_hint", ["tab", "window"])
+async def test_null(bidi_session, top_context, test_page, type_hint):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page, wait="complete"
+ )
+
+ current_top_level_context_id = top_context["context"]
+ other_top_level_context = await bidi_session.browsing_context.create(type_hint=type_hint)
+ other_top_level_context_id = other_top_level_context["context"]
+
+ # Retrieve all top-level browsing contexts
+ contexts = await bidi_session.browsing_context.get_tree(root=None)
+
+ assert len(contexts) == 2
+ if contexts[0]["context"] == current_top_level_context_id:
+ current_info = contexts[0]
+ other_info = contexts[1]
+ else:
+ current_info = contexts[1]
+ other_info = contexts[0]
+
+ assert_browsing_context(
+ current_info,
+ current_top_level_context_id,
+ children=0,
+ parent=None,
+ url=test_page,
+ )
+
+ assert_browsing_context(
+ other_info,
+ other_top_level_context_id,
+ children=0,
+ parent=None,
+ url="about:blank",
+ )
+
+
+@pytest.mark.parametrize("type_hint", ["tab", "window"])
+async def test_top_level_context(bidi_session, top_context, test_page, type_hint):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page, wait="complete"
+ )
+
+ other_top_level_context = await bidi_session.browsing_context.create(type_hint=type_hint)
+ other_top_level_context_id = other_top_level_context["context"]
+ # Retrieve all browsing contexts of the newly opened tab/window
+ contexts = await bidi_session.browsing_context.get_tree(root=other_top_level_context_id)
+
+ assert len(contexts) == 1
+ assert_browsing_context(
+ contexts[0],
+ other_top_level_context_id,
+ children=0,
+ parent=None,
+ url="about:blank",
+ )
+
+
+async def test_child_context(
+ bidi_session,
+ top_context,
+ test_page_same_origin_frame,
+ test_page_nested_frames,
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_nested_frames, wait="complete"
+ )
+
+ # First retrieve all browsing contexts for the first tab
+ top_level_context_id = top_context["context"]
+ all_contexts = await bidi_session.browsing_context.get_tree(root=top_level_context_id)
+
+ assert len(all_contexts) == 1
+ root_info = all_contexts[0]
+ assert_browsing_context(
+ root_info,
+ top_level_context_id,
+ children=1,
+ parent=None,
+ url=test_page_nested_frames,
+ )
+
+ child1_info = root_info["children"][0]
+ assert_browsing_context(
+ child1_info,
+ context=None,
+ children=1,
+ is_root=False,
+ parent=None,
+ url=test_page_same_origin_frame,
+ )
+
+ # Now retrieve all browsing contexts for the first browsing context child
+ child_contexts = await bidi_session.browsing_context.get_tree(root=child1_info["context"])
+
+ assert len(child_contexts) == 1
+ assert_browsing_context(
+ child_contexts[0],
+ root_info["children"][0]["context"],
+ children=1,
+ parent=root_info["context"],
+ url=test_page_same_origin_frame,
+ )
+
+ assert child1_info["children"][0] == child_contexts[0]["children"][0]
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/load.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/load.py
new file mode 100644
index 0000000000..d9a2da0990
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/load/load.py
@@ -0,0 +1,110 @@
+import pytest
+from tests.support.sync import AsyncPoll
+from webdriver.error import TimeoutException
+
+from ... import int_interval
+from .. import assert_navigation_info
+
+pytestmark = pytest.mark.asyncio
+
+CONTEXT_LOAD_EVENT = "browsingContext.load"
+
+
+async def test_not_unsubscribed(bidi_session, inline, top_context):
+ await bidi_session.session.subscribe(events=[CONTEXT_LOAD_EVENT])
+ await bidi_session.session.unsubscribe(events=[CONTEXT_LOAD_EVENT])
+
+ # Track all received browsingContext.load events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener(CONTEXT_LOAD_EVENT, on_event)
+
+ url = inline("<div>foo</div>")
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=url
+ )
+
+ wait = AsyncPoll(bidi_session, timeout=0.5)
+ with pytest.raises(TimeoutException):
+ await wait.until(lambda _: len(events) > 0)
+
+ remove_listener()
+
+
+async def test_subscribe(bidi_session, subscribe_events, inline, new_tab, wait_for_event):
+ await subscribe_events(events=[CONTEXT_LOAD_EVENT])
+
+ on_entry = wait_for_event(CONTEXT_LOAD_EVENT)
+ url = inline("<div>foo</div>")
+ await bidi_session.browsing_context.navigate(context=new_tab["context"], url=url)
+ event = await on_entry
+
+ assert_navigation_info(event, {"context": new_tab["context"], "url": url})
+
+
+async def test_timestamp(bidi_session, current_time, subscribe_events, inline, new_tab, wait_for_event):
+ await subscribe_events(events=[CONTEXT_LOAD_EVENT])
+
+ time_start = await current_time()
+
+ on_entry = wait_for_event(CONTEXT_LOAD_EVENT)
+ url = inline("<div>foo</div>")
+ await bidi_session.browsing_context.navigate(context=new_tab["context"], url=url)
+ event = await on_entry
+
+ time_end = await current_time()
+
+ assert_navigation_info(
+ event,
+ {"context": new_tab["context"], "timestamp": int_interval(time_start, time_end)}
+ )
+
+
+async def test_iframe(bidi_session, subscribe_events, new_tab, test_page, test_page_same_origin_frame):
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener(CONTEXT_LOAD_EVENT, on_event)
+ await subscribe_events(events=[CONTEXT_LOAD_EVENT])
+
+ await bidi_session.browsing_context.navigate(
+ context=new_tab["context"], url=test_page_same_origin_frame
+ )
+
+ wait = AsyncPoll(
+ bidi_session, message="Didn't receive context load events for frames"
+ )
+ await wait.until(lambda _: len(events) >= 2)
+ assert len(events) == 2
+
+ contexts = await bidi_session.browsing_context.get_tree(root=new_tab["context"])
+
+ assert len(contexts) == 1
+ root_info = contexts[0]
+ assert len(root_info["children"]) == 1
+ child_info = root_info["children"][0]
+
+ # First load event comes from iframe
+ assert_navigation_info(events[0], {"context": child_info["context"], "url": test_page})
+ assert_navigation_info(
+ events[1],
+ {"context": root_info["context"], "url": test_page_same_origin_frame}
+ )
+
+ remove_listener()
+
+
+@pytest.mark.parametrize("type_hint", ["tab", "window"])
+async def test_new_context(bidi_session, subscribe_events, wait_for_event, type_hint):
+ await subscribe_events(events=[CONTEXT_LOAD_EVENT])
+
+ on_entry = wait_for_event(CONTEXT_LOAD_EVENT)
+ new_context = await bidi_session.browsing_context.create(type_hint=type_hint)
+ event = await on_entry
+
+ assert_navigation_info(event, {"context": new_context["context"], "url": "about:blank"})
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/__init__.py
new file mode 100644
index 0000000000..9b7d28f6da
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/__init__.py
@@ -0,0 +1,25 @@
+import pytest
+
+from webdriver.bidi.error import UnknownErrorException
+
+
+async def navigate_and_assert(bidi_session, context, url, wait="complete", expected_error=False):
+ if expected_error:
+ with pytest.raises(UnknownErrorException):
+ await bidi_session.browsing_context.navigate(
+ context=context['context'], url=url, wait=wait
+ )
+
+ else:
+ result = await bidi_session.browsing_context.navigate(
+ context=context['context'], url=url, wait=wait
+ )
+ assert result["url"] == url
+
+ contexts = await bidi_session.browsing_context.get_tree(
+ root=context['context']
+ )
+ assert len(contexts) == 1
+ assert contexts[0]["url"] == url
+
+ return contexts
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/about_blank.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/about_blank.py
new file mode 100644
index 0000000000..1f6d4774ae
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/about_blank.py
@@ -0,0 +1,33 @@
+import pytest
+
+from . import navigate_and_assert
+
+pytestmark = pytest.mark.asyncio
+
+PAGE_ABOUT_BLANK = "about:blank"
+PAGE_EMPTY = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html"
+
+
+async def test_navigate_from_single_page(bidi_session, new_tab, url):
+ await navigate_and_assert(bidi_session, new_tab, url(PAGE_EMPTY))
+ await navigate_and_assert(bidi_session, new_tab, PAGE_ABOUT_BLANK)
+
+
+async def test_navigate_from_frameset(bidi_session, inline, new_tab, url):
+ frame_url = url(PAGE_EMPTY)
+ url_before = inline(f"<frameset><frame src='{frame_url}'/></frameset")
+ await navigate_and_assert(bidi_session, new_tab, url_before)
+
+ await navigate_and_assert(bidi_session, new_tab, PAGE_ABOUT_BLANK)
+
+
+async def test_navigate_in_iframe(bidi_session, inline, new_tab):
+ frame_start_url = inline("frame")
+ url_before = inline(f"<iframe src='{frame_start_url}'></iframe>")
+ contexts = await navigate_and_assert(bidi_session, new_tab, url_before)
+
+ assert len(contexts[0]["children"]) == 1
+ frame = contexts[0]["children"][0]
+ assert frame["url"] == frame_start_url
+
+ await navigate_and_assert(bidi_session, frame, PAGE_ABOUT_BLANK)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/data_url.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/data_url.py
new file mode 100644
index 0000000000..8fd5695646
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/data_url.py
@@ -0,0 +1,101 @@
+from urllib.parse import quote
+
+import pytest
+
+from . import navigate_and_assert
+
+pytestmark = pytest.mark.asyncio
+
+
+def dataURL(doc, mime_type="text/html", charset="utf-8", isBase64=False):
+ encoding = ""
+ if charset:
+ encoding = f"charset={charset}"
+ elif isBase64:
+ encoding = "base64"
+
+ return f"data:{mime_type};{encoding},{quote(doc)}"
+
+
+HTML_BAR = dataURL("<p>bar</p>")
+HTML_FOO = dataURL("<p>foo</p>")
+IMG_BLACK_PIXEL = dataURL(
+ "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==",
+ "image/png",
+ None,
+ True,
+)
+IMG_RED_PIXEL = dataURL(
+ "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEX/TQBcNTh/AAAAAXRSTlPM0jRW/QAAAApJREFUeJxjYgAAAAYAAzY3fKgAAAAASUVORK5CYII=",
+ "image/png",
+ None,
+ True,
+)
+PAGE = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html"
+TEXT_BAR = dataURL("bar", "text/plain")
+TEXT_FOO = dataURL("foo", "text/plain")
+
+
+def wrap_content_in_url(url, content):
+ """Check if content is not data url and wrap it in the url function"""
+ if content.startswith("data:"):
+ return content
+ return url(content)
+
+
+@pytest.mark.parametrize(
+ "url_before, url_after",
+ [
+ (PAGE, IMG_BLACK_PIXEL),
+ (IMG_BLACK_PIXEL, IMG_RED_PIXEL),
+ (IMG_BLACK_PIXEL, HTML_FOO),
+ (IMG_BLACK_PIXEL, PAGE),
+ (PAGE, HTML_FOO),
+ (HTML_FOO, TEXT_FOO),
+ (HTML_FOO, HTML_BAR),
+ (HTML_FOO, PAGE),
+ (PAGE, TEXT_FOO),
+ (TEXT_FOO, TEXT_BAR),
+ (TEXT_FOO, IMG_BLACK_PIXEL),
+ (TEXT_FOO, PAGE),
+ ],
+ ids=[
+ "document to data:image",
+ "data:image to data:image",
+ "data:image to data:html",
+ "data:image to document",
+ "document to data:html",
+ "data:html to data:html",
+ "data:html to data:text",
+ "data:html to document",
+ "document to data:text",
+ "data:text to data:text",
+ "data:text to data:image",
+ "data:text to document",
+ ],
+)
+async def test_navigate_from_single_page(
+ bidi_session, new_tab, url, url_before, url_after
+):
+ await navigate_and_assert(
+ bidi_session,
+ new_tab,
+ wrap_content_in_url(url, url_before),
+ )
+ await navigate_and_assert(
+ bidi_session,
+ new_tab,
+ wrap_content_in_url(url, url_after),
+ )
+
+
+async def test_navigate_in_iframe(bidi_session, inline, new_tab):
+ frame_start_url = inline("frame")
+ url_before = inline(f"<iframe src='{frame_start_url}'></iframe>")
+ contexts = await navigate_and_assert(bidi_session, new_tab, url_before)
+
+ assert len(contexts[0]["children"]) == 1
+ frame = contexts[0]["children"][0]
+ assert frame["url"] == frame_start_url
+
+ await navigate_and_assert(bidi_session, frame, HTML_BAR)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/error.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/error.py
new file mode 100644
index 0000000000..b5d9a9d8fe
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/error.py
@@ -0,0 +1,22 @@
+import pytest
+
+from . import navigate_and_assert
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize(
+ "url",
+ [
+ "thisprotocoldoesnotexist://",
+ "http://doesnotexist.localhost/",
+ "http://localhost:0",
+ ],
+ ids=[
+ "protocol",
+ "host",
+ "port",
+ ]
+)
+async def test_invalid_address(bidi_session, new_tab, url):
+ await navigate_and_assert(bidi_session, new_tab, url, expected_error=True)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/frame.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/frame.py
new file mode 100644
index 0000000000..2c2131b6ee
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/frame.py
@@ -0,0 +1,59 @@
+import pytest
+
+from . import navigate_and_assert
+
+pytestmark = pytest.mark.asyncio
+
+PAGE_CONTENT = "<div>foo</div>"
+
+
+@pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"])
+async def test_origin(bidi_session, new_tab, inline, domain):
+ frame_start_url = inline("frame")
+ url_before = inline(f"<iframe src='{frame_start_url}'></iframe>", domain=domain)
+ contexts = await navigate_and_assert(bidi_session, new_tab, url_before)
+
+ assert len(contexts[0]["children"]) == 1
+ frame = contexts[0]["children"][0]
+ assert frame["url"] == frame_start_url
+
+ await navigate_and_assert(bidi_session, frame, inline(PAGE_CONTENT))
+
+
+async def test_multiple_frames(
+ bidi_session, new_tab, test_page_multiple_frames, test_page, test_page2, inline
+):
+ contexts = await navigate_and_assert(
+ bidi_session, new_tab, test_page_multiple_frames
+ )
+
+ assert len(contexts[0]["children"]) == 2
+ frame = contexts[0]["children"][0]
+ assert frame["url"] == test_page
+
+ await navigate_and_assert(bidi_session, frame, inline(PAGE_CONTENT))
+
+ # Make sure that the sesond frame hasn't been navigated
+ contexts = await bidi_session.browsing_context.get_tree(root=new_tab["context"])
+ assert contexts[0]["children"][1]["url"] == test_page2
+
+
+async def test_nested_frames(
+ bidi_session,
+ new_tab,
+ inline,
+ test_page_nested_frames,
+ test_page_same_origin_frame,
+ test_page,
+):
+ contexts = await navigate_and_assert(bidi_session, new_tab, test_page_nested_frames)
+
+ assert len(contexts[0]["children"]) == 1
+ frame_level_1 = contexts[0]["children"][0]
+ assert frame_level_1["url"] == test_page_same_origin_frame
+
+ assert len(frame_level_1["children"]) == 1
+ frame_level_2 = frame_level_1["children"][0]
+ assert frame_level_2["url"] == test_page
+
+ await navigate_and_assert(bidi_session, frame_level_2, inline(PAGE_CONTENT))
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/hash.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/hash.py
new file mode 100644
index 0000000000..d4862a6201
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/hash.py
@@ -0,0 +1,62 @@
+import pytest
+
+from . import navigate_and_assert
+
+pytestmark = pytest.mark.asyncio
+
+PAGE_EMPTY = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html"
+PAGE_EMPTY_WITH_HASH_FOO = f"{PAGE_EMPTY}#foo"
+PAGE_OTHER = "/webdriver/tests/bidi/browsing_context/navigate/support/other.html"
+
+
+@pytest.mark.parametrize(
+ "hash_before, hash_after",
+ [
+ ("", "#foo"),
+ ("#foo", "#bar"),
+ ("#foo", "#foo"),
+ ("#bar", ""),
+ ],
+ ids=[
+ "without hash to with hash",
+ "with different hashes",
+ "with identical hashes",
+ "with hash to without hash",
+ ],
+)
+async def test_navigate_in_the_same_document(
+ bidi_session, new_tab, url, hash_before, hash_after
+):
+ await navigate_and_assert(bidi_session, new_tab, url(PAGE_EMPTY + hash_before))
+ await navigate_and_assert(bidi_session, new_tab, url(PAGE_EMPTY + hash_after))
+
+
+@pytest.mark.parametrize(
+ "url_before, url_after",
+ [
+ (PAGE_EMPTY_WITH_HASH_FOO, f"{PAGE_OTHER}#foo"),
+ (PAGE_EMPTY_WITH_HASH_FOO, f"{PAGE_OTHER}#bar"),
+ ],
+ ids=[
+ "with identical hashes",
+ "with different hashes",
+ ],
+)
+async def test_navigate_different_documents(
+ bidi_session, new_tab, url, url_before, url_after
+):
+ await navigate_and_assert(bidi_session, new_tab, url(url_before))
+ await navigate_and_assert(bidi_session, new_tab, url(url_after))
+
+
+async def test_navigate_in_iframe(bidi_session, inline, new_tab):
+ frame_start_url = inline("frame")
+ url_before = inline(f"<iframe src='{frame_start_url}'></iframe>")
+ contexts = await navigate_and_assert(bidi_session, new_tab, url_before)
+
+ assert len(contexts[0]["children"]) == 1
+ frame = contexts[0]["children"][0]
+ assert frame["url"] == frame_start_url
+
+ url_after = f"{frame_start_url}#foo"
+ await navigate_and_assert(bidi_session, frame, url_after)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/image.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/image.py
new file mode 100644
index 0000000000..b52ea9787c
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/image.py
@@ -0,0 +1,56 @@
+import pytest
+
+from . import navigate_and_assert
+
+pytestmark = pytest.mark.asyncio
+
+PAGE_EMPTY = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html"
+PNG_BLACK_DOT = "/webdriver/tests/bidi/browsing_context/navigate/support/black_dot.png"
+PNG_RED_DOT = "/webdriver/tests/bidi/browsing_context/navigate/support/red_dot.png"
+SVG = "/webdriver/tests/bidi/browsing_context/navigate/support/other.svg"
+
+
+@pytest.mark.parametrize(
+ "url_before, url_after",
+ [
+ (PAGE_EMPTY, SVG),
+ (SVG, PAGE_EMPTY),
+ (PAGE_EMPTY, PNG_BLACK_DOT),
+ (PNG_BLACK_DOT, PNG_RED_DOT),
+ (PNG_RED_DOT, SVG),
+ (PNG_BLACK_DOT, PAGE_EMPTY),
+ ],
+ ids=[
+ "document to svg",
+ "svg to document",
+ "document to png",
+ "png to png",
+ "png to svg",
+ "png to document",
+ ],
+)
+async def test_navigate_between_img_and_html(
+ bidi_session, new_tab, url, url_before, url_after
+):
+ await navigate_and_assert(bidi_session, new_tab, url(url_before))
+ await navigate_and_assert(bidi_session, new_tab, url(url_after))
+
+
+@pytest.mark.parametrize(
+ "img",
+ [SVG, PNG_BLACK_DOT],
+ ids=[
+ "to svg",
+ "to png",
+ ],
+)
+async def test_navigate_in_iframe(bidi_session, new_tab, inline, url, img):
+ frame_start_url = inline("frame")
+ url_before = inline(f"<iframe src='{frame_start_url}'></iframe>")
+ contexts = await navigate_and_assert(bidi_session, new_tab, url_before)
+
+ assert len(contexts[0]["children"]) == 1
+ frame = contexts[0]["children"][0]
+ assert frame["url"] == frame_start_url
+
+ await navigate_and_assert(bidi_session, frame, url(img))
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/invalid.py
new file mode 100644
index 0000000000..3ea45f0666
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/invalid.py
@@ -0,0 +1,52 @@
+import pytest
+import webdriver.bidi.error as error
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("value", [None, False, 42, {}, []])
+async def test_params_context_invalid_type(bidi_session, inline, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.navigate(
+ context=value, url=inline("<p>foo")
+ )
+
+
+@pytest.mark.parametrize("value", ["", "somestring"])
+async def test_params_context_invalid_value(bidi_session, inline, value):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.browsing_context.navigate(
+ context=value, url=inline("<p>foo")
+ )
+
+
+@pytest.mark.parametrize("value", [None, False, 42, {}, []])
+async def test_params_url_invalid_type(bidi_session, new_tab, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.navigate(
+ context=new_tab["context"], url=value
+ )
+
+
+@pytest.mark.parametrize("value", ["http://:invalid", "http://#invalid"])
+async def test_params_url_invalid_value(bidi_session, new_tab, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.navigate(
+ context=new_tab["context"], url=value
+ )
+
+
+@pytest.mark.parametrize("value", [False, 42, {}, []])
+async def test_params_wait_invalid_type(bidi_session, inline, new_tab, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.navigate(
+ context=new_tab["context"], url=inline("<p>bar"), wait=value
+ )
+
+
+@pytest.mark.parametrize("value", ["", "somestring"])
+async def test_params_wait_invalid_value(bidi_session, inline, new_tab, value):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.browsing_context.navigate(
+ context=new_tab["context"], url=inline("<p>bar"), wait=value
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/navigate.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/navigate.py
new file mode 100644
index 0000000000..a35f2728ef
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/navigate.py
@@ -0,0 +1,83 @@
+import asyncio
+
+import pytest
+
+from . import navigate_and_assert
+
+pytestmark = pytest.mark.asyncio
+
+
+async def test_payload(bidi_session, inline, new_tab):
+ url = inline("<div>foo</div>")
+ result = await bidi_session.browsing_context.navigate(
+ context=new_tab["context"], url=url
+ )
+
+ assert "navigation" in result
+ assert result["url"] == url
+
+
+async def test_interactive_simultaneous_navigation(bidi_session, inline, new_tab):
+ frame1_start_url = inline("frame1")
+ frame2_start_url = inline("frame2")
+
+ url = inline(
+ f"<iframe src='{frame1_start_url}'></iframe><iframe src='{frame2_start_url}'></iframe>"
+ )
+
+ contexts = await navigate_and_assert(bidi_session, new_tab, url)
+ assert len(contexts[0]["children"]) == 2
+
+ frame1_context_id = contexts[0]["children"][0]["context"]
+ frame2_context_id = contexts[0]["children"][1]["context"]
+
+ # The goal here is to navigate both iframes in parallel, and to use the
+ # interactive wait condition for both.
+ # Make sure that monitoring the DOMContentLoaded event for one frame does
+ # prevent monitoring it for the other frame.
+ img_url = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg"
+ script_url = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.js"
+ # frame1 also has a slow loading image so that it won't reach a complete
+ # navigation, and we can make sure we resolved with the interactive state.
+ frame1_url = inline(
+ f"""frame1_new<script src='{script_url}?pipe=trickle(d2)'></script>
+ <img src='{img_url}?pipe=trickle(d100)'>
+ """
+ )
+ frame2_url = inline(
+ f"frame2_new<script src='{script_url}?pipe=trickle(d0.5)'></script>"
+ )
+
+ frame1_task = asyncio.ensure_future(
+ bidi_session.browsing_context.navigate(
+ context=frame1_context_id, url=frame1_url, wait="interactive"
+ )
+ )
+
+ frame2_result = await bidi_session.browsing_context.navigate(
+ context=frame2_context_id, url=frame2_url, wait="interactive"
+ )
+ assert frame2_result["url"] == frame2_url
+
+ # The "interactive" navigation should resolve before the 5 seconds timeout.
+ await asyncio.wait_for(frame1_task, timeout=5)
+
+ frame1_result = frame1_task.result()
+ assert frame1_result["url"] == frame1_url
+
+ contexts = await bidi_session.browsing_context.get_tree(root=new_tab["context"])
+ assert contexts[0]["children"][0]["url"] == frame1_url
+ assert contexts[0]["children"][1]["url"] == frame2_url
+
+
+async def test_relative_url(bidi_session, new_tab, url):
+ url_before = url(
+ "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html"
+ )
+
+ # Navigate to page1 with wait=interactive to make sure the document's base URI
+ # was updated.
+ await navigate_and_assert(bidi_session, new_tab, url_before, "interactive")
+
+ url_after = url_before.replace("empty.html", "other.html")
+ await navigate_and_assert(bidi_session, new_tab, url_after, "interactive")
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/black_dot.png b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/black_dot.png
new file mode 100644
index 0000000000..613754cfaf
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/black_dot.png
Binary files differ
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.html b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.html
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.html
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.js b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.js
new file mode 100644
index 0000000000..3918c74e44
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.js
@@ -0,0 +1 @@
+"use strict";
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg
new file mode 100644
index 0000000000..e0af766e8f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg
@@ -0,0 +1,2 @@
+<svg xmlns="http://www.w3.org/2000/svg">
+</svg>
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.html b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.html
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.html
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.svg b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.svg
new file mode 100644
index 0000000000..7c20a99a4b
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/other.svg
@@ -0,0 +1,3 @@
+<svg xmlns="http://www.w3.org/2000/svg" width="100" height="100">
+ <rect x="10" y="10" width="100" height="100" style="fill: LightSkyBlue" />
+</svg>
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/red_dot.png b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/red_dot.png
new file mode 100644
index 0000000000..c5916f2897
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/support/red_dot.png
Binary files differ
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/wait.py b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/wait.py
new file mode 100644
index 0000000000..9a0b14e755
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/browsing_context/navigate/wait.py
@@ -0,0 +1,98 @@
+import pytest
+import asyncio
+
+pytestmark = pytest.mark.asyncio
+
+
+async def wait_for_navigation(bidi_session, context, url, wait, expect_timeout):
+ # Ultimately, "interactive" and "complete" should support a timeout argument.
+ # See https://github.com/w3c/webdriver-bidi/issues/188.
+ if expect_timeout:
+ with pytest.raises(asyncio.TimeoutError):
+ await asyncio.wait_for(
+ asyncio.shield(bidi_session.browsing_context.navigate(
+ context=context, url=url, wait=wait
+ )),
+ timeout=1,
+ )
+ else:
+ await bidi_session.browsing_context.navigate(
+ context=context, url=url, wait=wait
+ )
+
+
+@pytest.mark.parametrize("value", ["none", "interactive", "complete"])
+async def test_expected_url(bidi_session, inline, new_tab, value):
+ url = inline("<div>foo</div>")
+ result = await bidi_session.browsing_context.navigate(
+ context=new_tab["context"], url=url, wait=value
+ )
+ assert result["url"] == url
+ if value != "none":
+ contexts = await bidi_session.browsing_context.get_tree(
+ root=new_tab["context"], max_depth=0
+ )
+ assert contexts[0]["url"] == url
+
+
+@pytest.mark.parametrize(
+ "wait, expect_timeout",
+ [
+ ("none", False),
+ ("interactive", False),
+ ("complete", True),
+ ],
+)
+async def test_slow_image_blocks_load(bidi_session, inline, new_tab, wait, expect_timeout):
+ script_url = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.svg"
+ url = inline(f"<img src='{script_url}?pipe=trickle(d10)'>")
+
+ await wait_for_navigation(bidi_session, new_tab["context"], url, wait, expect_timeout)
+
+ # We cannot assert the URL for "none" by definition, and for "complete", since
+ # we expect a timeout. For the timeout case, the wait_for_navigation helper will
+ # resume after 1 second, there is no guarantee that the URL has been updated.
+ if wait == "interactive":
+ contexts = await bidi_session.browsing_context.get_tree(
+ root=new_tab["context"], max_depth=0
+ )
+ assert contexts[0]["url"] == url
+
+
+@pytest.mark.parametrize(
+ "wait, expect_timeout",
+ [
+ ("none", False),
+ ("interactive", True),
+ ("complete", True),
+ ],
+)
+async def test_slow_page(bidi_session, new_tab, url, wait, expect_timeout):
+ page_url = url(
+ "/webdriver/tests/bidi/browsing_context/navigate/support/empty.html?pipe=trickle(d10)"
+ )
+
+ await wait_for_navigation(bidi_session, new_tab["context"], page_url, wait, expect_timeout)
+
+ # Note that we cannot assert the top context url here, because the navigation
+ # is blocked on the initial url for this test case.
+
+
+@pytest.mark.parametrize(
+ "wait, expect_timeout",
+ [
+ ("none", False),
+ ("interactive", True),
+ ("complete", True),
+ ],
+)
+async def test_slow_script_blocks_domContentLoaded(bidi_session, inline, new_tab, wait, expect_timeout):
+ script_url = "/webdriver/tests/bidi/browsing_context/navigate/support/empty.js"
+ url = inline(f"<script src='{script_url}?pipe=trickle(d10)'></script>")
+
+ await wait_for_navigation(bidi_session, new_tab["context"], url, wait, expect_timeout)
+
+ # In theory we could also assert the top context URL has been updated here,
+ # but since we expect both "interactive" and "complete" to timeout the
+ # wait_for_navigation helper will resume arbitrarily after 1 second, and
+ # there is no guarantee that the URL has been updated.
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/conftest.py b/testing/web-platform/tests/webdriver/tests/bidi/conftest.py
new file mode 100644
index 0000000000..e4ff5a5ce4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/conftest.py
@@ -0,0 +1,48 @@
+import pytest
+
+
+@pytest.fixture
+def test_origin(url):
+ return url("")
+
+
+@pytest.fixture
+def test_alt_origin(url):
+ return url("", domain="alt")
+
+
+@pytest.fixture
+def test_page(inline):
+ return inline("<div>foo</div>")
+
+
+@pytest.fixture
+def test_page2(inline):
+ return inline("<div>bar</div>")
+
+
+@pytest.fixture
+def test_page_cross_origin(inline):
+ return inline("<div>bar</div>", domain="alt")
+
+
+@pytest.fixture
+def test_page_multiple_frames(inline, test_page, test_page2):
+ return inline(
+ f"<iframe src='{test_page}'></iframe><iframe src='{test_page2}'></iframe>"
+ )
+
+
+@pytest.fixture
+def test_page_nested_frames(inline, test_page_same_origin_frame):
+ return inline(f"<iframe src='{test_page_same_origin_frame}'></iframe>")
+
+
+@pytest.fixture
+def test_page_cross_origin_frame(inline, test_page_cross_origin):
+ return inline(f"<iframe src='{test_page_cross_origin}'></iframe>")
+
+
+@pytest.fixture
+def test_page_same_origin_frame(inline, test_page):
+ return inline(f"<iframe src='{test_page}'></iframe>")
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/errors/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/errors/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/errors/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/errors/errors.py b/testing/web-platform/tests/webdriver/tests/bidi/errors/errors.py
new file mode 100644
index 0000000000..0d6e9ea0e1
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/errors/errors.py
@@ -0,0 +1,16 @@
+import pytest
+
+from webdriver.bidi.error import UnknownCommandException
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("module_name, command_name", [
+ ("invalidmodule", "somecommand"),
+ ("session", "wrongcommand"),
+], ids=[
+ 'invalid module',
+ 'invalid command name',
+])
+async def test_unknown_command(bidi_session, send_blocking_command, module_name, command_name):
+ with pytest.raises(UnknownCommandException):
+ await send_blocking_command(f"{module_name}.{command_name}", {})
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/log/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/log/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/__init__.py
new file mode 100644
index 0000000000..e730e71fc6
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/__init__.py
@@ -0,0 +1,129 @@
+from webdriver.bidi.modules.script import ContextTarget
+
+from ... import (
+ any_int,
+ any_list,
+ any_string,
+ create_console_api_message,
+ recursive_compare,
+)
+
+
+def assert_base_entry(
+ entry,
+ level=any_string,
+ text=any_string,
+ timestamp=any_int,
+ realm=any_string,
+ context=None,
+ stacktrace=None
+):
+ recursive_compare({
+ "level": level,
+ "text": text,
+ "timestamp": timestamp,
+ "source": {
+ "realm": realm
+ }
+ }, entry)
+
+ if stacktrace is not None:
+ assert "stackTrace" in entry
+ assert isinstance(entry["stackTrace"], object)
+ assert "callFrames" in entry["stackTrace"]
+
+ call_frames = entry["stackTrace"]["callFrames"]
+ assert isinstance(call_frames, list)
+ assert len(call_frames) == len(stacktrace)
+ for index in range(0, len(call_frames)):
+ assert call_frames[index] == stacktrace[index]
+
+ source = entry["source"]
+ if context is not None:
+ assert "context" in source
+ assert source["context"] == context
+
+
+def assert_console_entry(
+ entry,
+ method=any_string,
+ level=any_string,
+ text=any_string,
+ args=any_list,
+ timestamp=any_int,
+ realm=any_string,
+ context=None,
+ stacktrace=None
+):
+ assert_base_entry(
+ entry=entry,
+ level=level,
+ text=text,
+ timestamp=timestamp,
+ realm=realm,
+ context=context,
+ stacktrace=stacktrace)
+
+ recursive_compare({
+ "type": "console",
+ "method": method,
+ "args": args
+ }, entry)
+
+
+def assert_javascript_entry(
+ entry,
+ level=any_string,
+ text=any_string,
+ timestamp=any_int,
+ realm=any_string,
+ context=None,
+ stacktrace=None
+):
+ assert_base_entry(
+ entry=entry,
+ level=level,
+ text=text,
+ timestamp=timestamp,
+ realm=realm,
+ stacktrace=stacktrace,
+ context=context)
+
+ recursive_compare({
+ "type": "javascript",
+ }, entry)
+
+
+async def create_console_api_message_for_primitive_value(bidi_session, context, type, value):
+ await bidi_session.script.evaluate(
+ expression=f"""console.{type}({value})""",
+ await_promise=False,
+ target=ContextTarget(context["context"]),
+ )
+
+
+async def create_javascript_error(bidi_session, context, error_message="foo"):
+ str_remote_value = {"type": "string", "value": error_message}
+
+ result = await bidi_session.script.call_function(
+ function_declaration="""(error_message) => {
+ const script = document.createElement("script");
+ script.append(document.createTextNode(`(() => { throw new Error("${error_message}") })()`));
+ document.body.append(script);
+
+ const err = new Error(error_message);
+ return err.toString();
+ }""",
+ arguments=[str_remote_value],
+ await_promise=False,
+ target=ContextTarget(context["context"]),
+ )
+
+ return result["value"]
+
+
+def create_log(bidi_session, context, log_type, text="foo"):
+ if log_type == "console_api_log":
+ return create_console_api_message(bidi_session, context, text)
+ if log_type == "javascript_error":
+ return create_javascript_error(bidi_session, context, text)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console.py
new file mode 100644
index 0000000000..b66057eec0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console.py
@@ -0,0 +1,170 @@
+import pytest
+from webdriver.bidi.modules.script import ContextTarget
+
+from . import assert_console_entry, create_console_api_message_for_primitive_value
+from ... import any_string, int_interval, recursive_compare
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "log_argument, expected_text",
+ [
+ ("'TEST'", "TEST"),
+ ("'TWO', 'PARAMETERS'", "TWO PARAMETERS"),
+ ("{}", any_string),
+ ("['1', '2', '3']", any_string),
+ ("null, undefined", "null undefined"),
+ ],
+ ids=[
+ "single string",
+ "two strings",
+ "empty object",
+ "array of strings",
+ "null and undefined",
+ ],
+)
+async def test_text_with_argument_variation(
+ bidi_session, top_context, wait_for_event, log_argument, expected_text,
+):
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message_for_primitive_value(
+ bidi_session, top_context, "log", log_argument)
+ event_data = await on_entry_added
+
+ assert_console_entry(event_data, text=expected_text, context=top_context["context"])
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "log_method, expected_level",
+ [
+ ("assert", "error"),
+ ("debug", "debug"),
+ ("error", "error"),
+ ("info", "info"),
+ ("log", "info"),
+ ("table", "info"),
+ ("trace", "debug"),
+ ("warn", "warn"),
+ ],
+)
+async def test_level(
+ bidi_session, top_context, wait_for_event, log_method, expected_level
+):
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+
+ if log_method == "assert":
+ # assert has to be called with a first falsy argument to trigger a log.
+ await create_console_api_message_for_primitive_value(
+ bidi_session, top_context, "assert", "false, 'foo'")
+ else:
+ await create_console_api_message_for_primitive_value(
+ bidi_session, top_context, log_method, "'foo'")
+
+ event_data = await on_entry_added
+
+ assert_console_entry(
+ event_data, text="foo", level=expected_level, method=log_method
+ )
+
+
+@pytest.mark.asyncio
+async def test_timestamp(bidi_session, top_context, wait_for_event, current_time):
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+
+ time_start = await current_time()
+
+ script = """new Promise(resolve => {
+ setTimeout(() => {
+ console.log('foo');
+ resolve();
+ }, 100);
+ });
+ """
+ await bidi_session.script.evaluate(
+ expression=script,
+ await_promise=True,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ event_data = await on_entry_added
+
+ time_end = await current_time()
+
+ assert_console_entry(event_data, text="foo", timestamp=int_interval(time_start, time_end))
+
+
+@pytest.mark.asyncio
+async def test_new_context_with_new_window(bidi_session, top_context, wait_for_event):
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message_for_primitive_value(
+ bidi_session, top_context, 'log', "'foo'")
+ event_data = await on_entry_added
+ assert_console_entry(event_data, text="foo", context=top_context["context"])
+
+ new_context = await bidi_session.browsing_context.create(type_hint="tab")
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message_for_primitive_value(
+ bidi_session, new_context, 'log', "'foo_in_new_window'")
+ event_data = await on_entry_added
+ assert_console_entry(event_data, text="foo_in_new_window", context=new_context["context"])
+
+
+@pytest.mark.asyncio
+async def test_new_context_with_refresh(bidi_session, top_context, wait_for_event):
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message_for_primitive_value(
+ bidi_session, top_context, 'log', "'foo'")
+ event_data = await on_entry_added
+ assert_console_entry(event_data, text="foo", context=top_context["context"])
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=top_context["url"], wait="complete"
+ )
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message_for_primitive_value(
+ bidi_session, top_context, 'log', "'foo_after_refresh'")
+ event_data = await on_entry_added
+ assert_console_entry(
+ event_data, text="foo_after_refresh", context=top_context["context"]
+ )
+
+
+@pytest.mark.asyncio
+async def test_different_contexts(
+ bidi_session,
+ top_context,
+ wait_for_event,
+ test_page_same_origin_frame,
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_same_origin_frame, wait="complete"
+ )
+ contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"])
+ assert len(contexts[0]["children"]) == 1
+ frame_context = contexts[0]["children"][0]
+
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message_for_primitive_value(
+ bidi_session, top_context, "log", "'foo'")
+ event_data = await on_entry_added
+ assert_console_entry(event_data, text="foo", context=top_context["context"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message_for_primitive_value(
+ bidi_session, frame_context, "log", "'bar'")
+ event_data = await on_entry_added
+ assert_console_entry(event_data, text="bar", context=frame_context["context"])
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console_args.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console_args.py
new file mode 100644
index 0000000000..857d9f7b17
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/console_args.py
@@ -0,0 +1,48 @@
+import pytest
+from webdriver.bidi.modules.script import ContextTarget
+
+from . import assert_console_entry, create_console_api_message_for_primitive_value
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("data,remote_value", [
+ ("undefined", {"type": "undefined"}),
+ ("null", {"type": "null"}),
+ ("'bar'", {"type": "string", "value": "bar"}),
+ ("42", {"type": "number", "value": 42}),
+ ("Number.NaN", {"type": "number", "value": "NaN"}),
+ ("-0", {"type": "number", "value": "-0"}),
+ ("Number.POSITIVE_INFINITY", {"type": "number", "value": "Infinity"}),
+ ("Number.NEGATIVE_INFINITY", {"type": "number", "value": "-Infinity"}),
+ ("false", {"type": "boolean", "value": False}),
+ ("42n", {"type": "bigint", "value": "42"}),
+], ids=[
+ "undefined",
+ "null",
+ "string",
+ "number",
+ "NaN",
+ "-0",
+ "Infinity",
+ "-Infinity",
+ "boolean",
+ "bigint",
+])
+async def test_primitive_types(
+ bidi_session, top_context, wait_for_event, data, remote_value
+):
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message_for_primitive_value(
+ bidi_session, top_context, "log", f"'foo', {data}")
+ event_data = await on_entry_added
+ args = [
+ {"type": "string", "value": "foo"},
+ {"type": remote_value["type"]},
+ ]
+ if "value" in remote_value:
+ args[1].update({"value": remote_value["value"]})
+
+ # First arg is always the first argument as provided to console.log()
+ assert_console_entry(event_data, args=args)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/event_buffer.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/event_buffer.py
new file mode 100644
index 0000000000..69c96c8c54
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/event_buffer.py
@@ -0,0 +1,96 @@
+import asyncio
+
+import pytest
+
+from . import assert_base_entry, create_log
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("log_type", ["console_api_log", "javascript_error"])
+async def test_console_log_cached_messages(
+ bidi_session, wait_for_event, log_type, new_tab
+):
+ # Clear events buffer.
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+ await bidi_session.session.unsubscribe(events=["log.entryAdded"])
+
+ # Log a message before subscribing
+ expected_text = await create_log(bidi_session, new_tab, log_type, "cached_message")
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Subscribe
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+ # Cached events are emitted before the subscribe command is finished.
+ assert len(events) == 1
+
+ # Check the log.entryAdded event received has the expected properties.
+ assert_base_entry(events[0], text=expected_text, context=new_tab["context"])
+
+ # Unsubscribe and re-subscribe
+ await bidi_session.session.unsubscribe(events=["log.entryAdded"])
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ # Check that the cached event was not re-emitted.
+ assert len(events) == 1
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ expected_text = await create_log(bidi_session, new_tab, log_type, "live_message")
+ await on_entry_added
+
+ # Check that we only received the live message.
+ assert len(events) == 2
+ assert_base_entry(events[1], text=expected_text, context=new_tab["context"])
+
+ # Unsubscribe, log a message and re-subscribe
+ await bidi_session.session.unsubscribe(events=["log.entryAdded"])
+ expected_text = await create_log(bidi_session, new_tab, log_type, "cached_message_2")
+
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ # Check that only the newly cached event was emitted
+ assert len(events) == 3
+ assert_base_entry(events[2], text=expected_text, context=new_tab["context"])
+
+ await bidi_session.session.unsubscribe(events=["log.entryAdded"])
+ remove_listener()
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("log_type", ["console_api_log", "javascript_error"])
+async def test_console_log_cached_message_after_refresh(
+ bidi_session, subscribe_events, new_tab, log_type
+):
+ # Clear events buffer.
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+ await bidi_session.session.unsubscribe(events=["log.entryAdded"])
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Log a message, refresh, log another message and subscribe
+ expected_text_1 = await create_log(bidi_session, new_tab, log_type, "cached_message_1")
+ await bidi_session.browsing_context.navigate(
+ context=new_tab["context"], url=new_tab["url"], wait="complete"
+ )
+ expected_text_2 = await create_log(bidi_session, new_tab, log_type, "cached_message_2")
+
+ await subscribe_events(events=["log.entryAdded"])
+
+ # Check that only the cached message was retrieved.
+ assert len(events) == 2
+ assert_base_entry(events[0], text=expected_text_1)
+ assert_base_entry(events[1], text=expected_text_2)
+
+ remove_listener()
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/javascript.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/javascript.py
new file mode 100644
index 0000000000..fe8a9b6b58
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/javascript.py
@@ -0,0 +1,31 @@
+import math
+import time
+
+import pytest
+
+from . import assert_javascript_entry, create_log
+from ... import int_interval
+
+
+@pytest.mark.asyncio
+async def test_types_and_values(
+ bidi_session, current_time, inline, top_context, wait_for_event
+):
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+
+ time_start = await current_time()
+
+ expected_text = await create_log(bidi_session, top_context, "javascript_error", "cached_message")
+ event_data = await on_entry_added
+
+ time_end = await current_time()
+
+ assert_javascript_entry(
+ event_data,
+ level="error",
+ text=expected_text,
+ timestamp=int_interval(time_start, time_end),
+ context=top_context["context"],
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/stacktrace.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/stacktrace.py
new file mode 100644
index 0000000000..d226476ef7
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/stacktrace.py
@@ -0,0 +1,121 @@
+import pytest
+
+from . import assert_console_entry, assert_javascript_entry
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "log_method, expect_stack",
+ [
+ ("assert", True),
+ ("debug", False),
+ ("error", True),
+ ("info", False),
+ ("log", False),
+ ("table", False),
+ ("trace", True),
+ ("warn", True),
+ ],
+)
+async def test_console_entry_sync_callstack(
+ bidi_session, inline, top_context, wait_for_event, log_method, expect_stack
+):
+ if log_method == "assert":
+ # assert has to be called with a first falsy argument to trigger a log.
+ url = inline(
+ f"""
+ <script>
+ function foo() {{ console.{log_method}(false, "cheese"); }}
+ function bar() {{ foo(); }}
+ bar();
+ </script>
+ """
+ )
+ else:
+ url = inline(
+ f"""
+ <script>
+ function foo() {{ console.{log_method}("cheese"); }}
+ function bar() {{ foo(); }}
+ bar();
+ </script>
+ """
+ )
+
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+
+ if expect_stack:
+ expected_stack = [
+ {"columnNumber": 41, "functionName": "foo", "lineNumber": 4, "url": url},
+ {"columnNumber": 33, "functionName": "bar", "lineNumber": 5, "url": url},
+ {"columnNumber": 16, "functionName": "", "lineNumber": 6, "url": url},
+ ]
+ else:
+ expected_stack = None
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=url, wait="complete"
+ )
+
+ event_data = await on_entry_added
+
+ assert_console_entry(
+ event_data,
+ method=log_method,
+ text="cheese",
+ stacktrace=expected_stack,
+ context=top_context["context"],
+ )
+
+ # Navigate to a page with no error to avoid polluting the next tests with
+ # JavaScript errors.
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=inline("<p>foo"), wait="complete"
+ )
+
+
+@pytest.mark.asyncio
+async def test_javascript_entry_sync_callstack(
+ bidi_session, inline, top_context, wait_for_event
+):
+ url = inline(
+ """
+ <script>
+ function foo() { throw new Error("cheese"); }
+ function bar() { foo(); }
+ bar();
+ </script>
+ """
+ )
+
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+
+ expected_stack = [
+ {"columnNumber": 35, "functionName": "foo", "lineNumber": 4, "url": url},
+ {"columnNumber": 29, "functionName": "bar", "lineNumber": 5, "url": url},
+ {"columnNumber": 12, "functionName": "", "lineNumber": 6, "url": url},
+ ]
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=url, wait="complete"
+ )
+
+ event_data = await on_entry_added
+
+ assert_javascript_entry(
+ event_data,
+ level="error",
+ text="Error: cheese",
+ stacktrace=expected_stack,
+ context=top_context["context"],
+ )
+
+ # Navigate to a page with no error to avoid polluting the next tests with
+ # JavaScript errors.
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=inline("<p>foo"), wait="complete"
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/subscription.py b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/subscription.py
new file mode 100644
index 0000000000..d23a4cf83e
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/log/entry_added/subscription.py
@@ -0,0 +1,108 @@
+import asyncio
+
+import pytest
+
+from . import assert_base_entry, create_log
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("log_type", ["console_api_log", "javascript_error"])
+async def test_subscribe_twice(bidi_session, new_tab, wait_for_event, log_type):
+ # Subscribe to log.entryAdded twice and check that events are received once.
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Check for a ConsoleLogEntry.
+ on_entry_added = wait_for_event("log.entryAdded")
+ expected_text = await create_log(bidi_session, new_tab, log_type, "text1")
+ await on_entry_added
+
+ assert len(events) == 1
+ assert_base_entry(events[0], text=expected_text)
+
+ # Wait for some time and check the events array again
+ await asyncio.sleep(0.5)
+ assert len(events) == 1
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("log_type", ["console_api_log", "javascript_error"])
+async def test_subscribe_unsubscribe(bidi_session, new_tab, wait_for_event, log_type):
+ # Subscribe for log events globally
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_log(bidi_session, new_tab, log_type, "some text")
+ await on_entry_added
+
+ # Unsubscribe from log events globally
+ await bidi_session.session.unsubscribe(events=["log.entryAdded"])
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ expected_text_0 = await create_log(bidi_session, new_tab, log_type, "text_0")
+
+ # Wait for some time before checking the events array
+ await asyncio.sleep(0.5)
+ assert len(events) == 0
+
+ # Refresh to create a new context
+ await bidi_session.browsing_context.navigate(
+ context=new_tab["context"], url=new_tab["url"], wait="complete"
+ )
+
+ # Check we still don't receive ConsoleLogEntry events from the new context
+ expected_text_1 = await create_log(bidi_session, new_tab, log_type, "text_1")
+
+ # Wait for some time before checking the events array
+ await asyncio.sleep(0.5)
+ assert len(events) == 0
+
+ # Refresh to create a new context. Note that we refresh to avoid getting
+ # cached events from the log event buffer.
+ await bidi_session.browsing_context.navigate(
+ context=new_tab["context"], url=new_tab["url"], wait="complete"
+ )
+
+ # Check that if we subscribe again, we can receive events
+ await bidi_session.session.subscribe(events=["log.entryAdded"])
+
+ # Check buffered events are emitted.
+ assert len(events) == 2
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ expected_text_2 = await create_log(bidi_session, new_tab, log_type, "text_2")
+ await on_entry_added
+
+ assert len(events) == 3
+ assert_base_entry(events[0], text=expected_text_0, context=new_tab["context"])
+ assert_base_entry(events[1], text=expected_text_1, context=new_tab["context"])
+ assert_base_entry(events[2], text=expected_text_2, context=new_tab["context"])
+
+ # Check that we also get events from a new context
+ new_context = await bidi_session.browsing_context.create(type_hint="tab")
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ expected_text_3 = await create_log(bidi_session, new_context, log_type, "text_3")
+ await on_entry_added
+
+ assert len(events) == 4
+ assert_base_entry(events[3], text=expected_text_3, context=new_context["context"])
+
+ remove_listener()
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/network/__init__.py
new file mode 100644
index 0000000000..d0451d0f28
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/__init__.py
@@ -0,0 +1,230 @@
+from .. import (
+ any_bool,
+ any_dict,
+ any_int,
+ any_int_or_null,
+ any_list,
+ any_string,
+ any_string_or_null,
+ recursive_compare,
+)
+
+
+def assert_cookies(request_cookies, expected_cookies):
+ assert len(request_cookies) == len(expected_cookies)
+
+ # Simple helper to find a cookie by key and value only.
+ def match_cookie(cookie, expected):
+ for key in expected:
+ if cookie[key] != expected[key]:
+ return False
+
+ return True
+
+ for cookie in expected_cookies:
+ assert next(c for c in request_cookies if match_cookie(c, cookie)) is not None
+
+
+def assert_headers(request_headers, expected_headers):
+ # The browser sets request headers, only assert that the expected headers
+ # are included in the request's headers.
+ assert len(request_headers) >= len(expected_headers)
+ for header in expected_headers:
+ assert next(h for h in request_headers if header == h) is not None
+
+
+def assert_timing_info(timing_info):
+ recursive_compare(
+ {
+ "requestTime": any_int,
+ "redirectStart": any_int,
+ "redirectEnd": any_int,
+ "fetchStart": any_int,
+ "dnsStart": any_int,
+ "dnsEnd": any_int,
+ "connectStart": any_int,
+ "connectEnd": any_int,
+ "tlsStart": any_int,
+ "tlsEnd": any_int,
+ "requestStart": any_int,
+ "responseStart": any_int,
+ "responseEnd": any_int,
+ },
+ timing_info,
+ )
+
+
+def assert_request_data(request_data, expected_request):
+ recursive_compare(
+ {
+ "bodySize": any_int_or_null,
+ "cookies": any_list,
+ "headers": any_list,
+ "headersSize": any_int,
+ "method": any_string,
+ "request": any_string,
+ "timings": any_dict,
+ "url": any_string,
+ },
+ request_data,
+ )
+
+ assert_timing_info(request_data["timings"])
+
+ if "cookies" in expected_request:
+ assert_cookies(request_data["cookies"], expected_request["cookies"])
+ # While recursive_compare tolerates missing entries in dict, arrays
+ # need to have the exact same number of items, and be in the same order.
+ # We don't want to assert all headers and cookies, so we do a custom
+ # assert for each and then delete it before using recursive_compare.
+ del expected_request["cookies"]
+
+ if "headers" in expected_request:
+ assert_headers(request_data["headers"], expected_request["headers"])
+ # Remove headers before using recursive_compare, see comment for cookies
+ del expected_request["headers"]
+
+ recursive_compare(expected_request, request_data)
+
+
+def assert_base_parameters(
+ event,
+ context=None,
+ is_redirect=None,
+ redirect_count=None,
+ expected_request=None,
+):
+ recursive_compare(
+ {
+ "context": any_string_or_null,
+ "navigation": any_string_or_null,
+ "isRedirect": any_bool,
+ "redirectCount": any_int,
+ "request": any_dict,
+ "timestamp": any_int,
+ },
+ event,
+ )
+
+ if context is not None:
+ assert event["context"] == context
+
+ if is_redirect is not None:
+ assert event["isRedirect"] == is_redirect
+
+ if redirect_count is not None:
+ assert event["redirectCount"] == redirect_count
+
+ # Assert request data
+ if expected_request is not None:
+ assert_request_data(event["request"], expected_request)
+
+
+def assert_before_request_sent_event(
+ event,
+ context=None,
+ is_redirect=None,
+ redirect_count=None,
+ expected_request=None,
+):
+ # Assert initiator
+ assert isinstance(event["initiator"], dict)
+ assert isinstance(event["initiator"]["type"], str)
+
+ # Assert base parameters
+ assert_base_parameters(
+ event,
+ context=context,
+ is_redirect=is_redirect,
+ redirect_count=redirect_count,
+ expected_request=expected_request,
+ )
+
+
+def assert_response_data(response_data, expected_response):
+ recursive_compare(
+ {
+ "bodySize": any_int_or_null,
+ "bytesReceived": any_int,
+ "content": {
+ "size": any_int_or_null,
+ },
+ "fromCache": any_bool,
+ "headersSize": any_int_or_null,
+ "protocol": any_string,
+ "status": any_int,
+ "statusText": any_string,
+ "url": any_string,
+ },
+ response_data,
+ )
+
+ if "headers" in expected_response:
+ assert_headers(response_data["headers"], expected_response["headers"])
+ # Remove headers before using recursive_compare, see comment for cookies
+ # in assert_request_data
+ del expected_response["headers"]
+
+ recursive_compare(expected_response, response_data)
+
+
+def assert_response_event(
+ event,
+ context=None,
+ is_redirect=None,
+ redirect_count=None,
+ expected_request=None,
+ expected_response=None,
+):
+ # Assert response data
+ any_dict(event["response"])
+ if expected_response is not None:
+ assert_response_data(event["response"], expected_response)
+
+ # Assert base parameters
+ assert_base_parameters(
+ event,
+ context=context,
+ is_redirect=is_redirect,
+ redirect_count=redirect_count,
+ expected_request=expected_request,
+ )
+
+# Array of status and status text expected to be available in network events
+HTTP_STATUS_AND_STATUS_TEXT = [
+ (101, "Switching Protocols"),
+ (200, "OK"),
+ (201, "Created"),
+ (202, "Accepted"),
+ (203, "Non-Authoritative Information"),
+ (204, "No Content"),
+ (205, "Reset Content"),
+ (206, "Partial Content"),
+ (300, "Multiple Choices"),
+ (301, "Moved Permanently"),
+ (302, "Found"),
+ (303, "See Other"),
+ (305, "Use Proxy"),
+ (307, "Temporary Redirect"),
+ (400, "Bad Request"),
+ (401, "Unauthorized"),
+ (402, "Payment Required"),
+ (403, "Forbidden"),
+ (404, "Not Found"),
+ (405, "Method Not Allowed"),
+ (406, "Not Acceptable"),
+ (407, "Proxy Authentication Required"),
+ (408, "Request Timeout"),
+ (409, "Conflict"),
+ (410, "Gone"),
+ (411, "Length Required"),
+ (412, "Precondition Failed"),
+ (415, "Unsupported Media Type"),
+ (417, "Expectation Failed"),
+ (500, "Internal Server Error"),
+ (501, "Not Implemented"),
+ (502, "Bad Gateway"),
+ (503, "Service Unavailable"),
+ (504, "Gateway Timeout"),
+ (505, "HTTP Version Not Supported"),
+]
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/before_request_sent_tentative.py b/testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/before_request_sent_tentative.py
new file mode 100644
index 0000000000..1711f5177f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/before_request_sent/before_request_sent_tentative.py
@@ -0,0 +1,295 @@
+import asyncio
+
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget
+
+from tests.support.sync import AsyncPoll
+
+from .. import assert_before_request_sent_event
+
+PAGE_EMPTY_HTML = "/webdriver/tests/bidi/network/support/empty.html"
+PAGE_EMPTY_TEXT = "/webdriver/tests/bidi/network/support/empty.txt"
+PAGE_REDIRECT_HTTP_EQUIV = (
+ "/webdriver/tests/bidi/network/support/redirect_http_equiv.html"
+)
+PAGE_REDIRECTED_HTML = "/webdriver/tests/bidi/network/support/redirected.html"
+
+# The following tests are marked as tentative until
+# https://github.com/w3c/webdriver-bidi/pull/204 is merged.
+
+
+@pytest.mark.asyncio
+async def test_subscribe_status(bidi_session, top_context, wait_for_event, url, fetch):
+ await bidi_session.session.subscribe(events=["network.beforeRequestSent"])
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"],
+ url=url(PAGE_EMPTY_HTML),
+ wait="complete",
+ )
+
+ # Track all received network.beforeRequestSent events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener(
+ "network.beforeRequestSent", on_event
+ )
+
+ text_url = url(PAGE_EMPTY_TEXT)
+ on_before_request_sent = wait_for_event("network.beforeRequestSent")
+ await fetch(text_url)
+ await on_before_request_sent
+
+ assert len(events) == 1
+ expected_request = {"method": "GET", "url": text_url}
+ assert_before_request_sent_event(
+ events[0],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+ await bidi_session.session.unsubscribe(events=["network.beforeRequestSent"])
+
+ # Fetch the text url again, with an additional parameter to bypass the cache
+ # and check no new event is received.
+ await fetch(f"{text_url}?nocache")
+ await asyncio.sleep(0.5)
+ assert len(events) == 1
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_load_page_twice(
+ bidi_session, top_context, wait_for_event, url, fetch, setup_network_test
+):
+ html_url = url(PAGE_EMPTY_HTML)
+
+ network_events = await setup_network_test(events=["network.beforeRequestSent"])
+ events = network_events["network.beforeRequestSent"]
+
+ on_before_request_sent = wait_for_event("network.beforeRequestSent")
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"],
+ url=html_url,
+ wait="complete",
+ )
+ await on_before_request_sent
+
+ assert len(events) == 1
+ expected_request = {"method": "GET", "url": html_url}
+ assert_before_request_sent_event(
+ events[0],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.parametrize(
+ "method",
+ [
+ "GET",
+ "HEAD",
+ "POST",
+ "PUT",
+ "DELETE",
+ "OPTIONS",
+ "PATCH",
+ ],
+)
+@pytest.mark.asyncio
+async def test_request_method(
+ bidi_session, wait_for_event, url, fetch, setup_network_test, method
+):
+ text_url = url(PAGE_EMPTY_TEXT)
+
+ network_events = await setup_network_test(events=["network.beforeRequestSent"])
+ events = network_events["network.beforeRequestSent"]
+
+ on_before_request_sent = wait_for_event("network.beforeRequestSent")
+ await fetch(text_url, method=method)
+ await on_before_request_sent
+
+ assert len(events) == 1
+ expected_request = {"method": method, "url": text_url}
+ assert_before_request_sent_event(
+ events[0],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.asyncio
+async def test_request_headers(
+ bidi_session, wait_for_event, url, fetch, setup_network_test
+):
+ text_url = url(PAGE_EMPTY_TEXT)
+
+
+ network_events = await setup_network_test(events=["network.beforeRequestSent"])
+ events = network_events["network.beforeRequestSent"]
+
+ on_before_request_sent = wait_for_event("network.beforeRequestSent")
+ await fetch(text_url, method="GET", headers={"foo": "bar"})
+ await on_before_request_sent
+
+ assert len(events) == 1
+ expected_request = {
+ "headers": ({"name": "foo", "value": "bar"},),
+ "method": "GET",
+ "url": text_url,
+ }
+ assert_before_request_sent_event(
+ events[0],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.asyncio
+async def test_request_cookies(
+ bidi_session, top_context, wait_for_event, url, fetch, setup_network_test
+):
+ text_url = url(PAGE_EMPTY_TEXT)
+
+
+ network_events = await setup_network_test(events=["network.beforeRequestSent"])
+ events = network_events["network.beforeRequestSent"]
+
+ await bidi_session.script.evaluate(
+ expression="document.cookie = 'foo=bar';",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ on_before_request_sent = wait_for_event("network.beforeRequestSent")
+ await fetch(text_url, method="GET")
+ await on_before_request_sent
+
+ assert len(events) == 1
+ expected_request = {
+ "cookies": ({"name": "foo", "value": "bar"},),
+ "method": "GET",
+ "url": text_url,
+ }
+ assert_before_request_sent_event(
+ events[0],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+ await bidi_session.script.evaluate(
+ expression="document.cookie = 'fuu=baz';",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ on_before_request_sent = wait_for_event("network.beforeRequestSent")
+ await fetch(text_url, method="GET")
+ await on_before_request_sent
+
+ assert len(events) == 2
+
+ expected_request = {
+ "cookies": (
+ {"name": "foo", "value": "bar"},
+ {"name": "fuu", "value": "baz"},
+ ),
+ "method": "GET",
+ "url": text_url,
+ }
+ assert_before_request_sent_event(
+ events[1],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.asyncio
+async def test_redirect(bidi_session, wait_for_event, url, fetch, setup_network_test):
+ text_url = url(PAGE_EMPTY_TEXT)
+ redirect_url = url(
+ f"/webdriver/tests/support/http_handlers/redirect.py?location={text_url}"
+ )
+
+
+ network_events = await setup_network_test(events=["network.beforeRequestSent"])
+ events = network_events["network.beforeRequestSent"]
+
+ await fetch(redirect_url, method="GET")
+
+ # Wait until we receive two events, one for the initial request and one for
+ # the redirection.
+ wait = AsyncPoll(bidi_session, timeout=2)
+ await wait.until(lambda _: len(events) >= 2)
+
+ assert len(events) == 2
+ expected_request = {"method": "GET", "url": redirect_url}
+ assert_before_request_sent_event(
+ events[0],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+ expected_request = {"method": "GET", "url": text_url}
+ assert_before_request_sent_event(
+ events[1], expected_request=expected_request, redirect_count=1, is_redirect=True
+ )
+
+ # Check that both requests share the same requestId
+ assert events[0]["request"]["request"] == events[1]["request"]["request"]
+
+
+@pytest.mark.asyncio
+async def test_redirect_http_equiv(
+ bidi_session, top_context, wait_for_event, url, setup_network_test
+):
+ # PAGE_REDIRECT_HTTP_EQUIV should redirect to PAGE_REDIRECTED_HTML immediately
+ http_equiv_url = url(PAGE_REDIRECT_HTTP_EQUIV)
+ redirected_url = url(PAGE_REDIRECTED_HTML)
+
+
+ network_events = await setup_network_test(events=["network.beforeRequestSent"])
+ events = network_events["network.beforeRequestSent"]
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"],
+ url=http_equiv_url,
+ wait="complete",
+ )
+
+ # Wait until we receive two events, one for the initial request and one for
+ # the http-equiv "redirect".
+ wait = AsyncPoll(bidi_session, timeout=2)
+ await wait.until(lambda _: len(events) >= 2)
+
+ assert len(events) == 2
+ expected_request = {"method": "GET", "url": http_equiv_url}
+ assert_before_request_sent_event(
+ events[0],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+ # http-equiv redirect should not be considered as a redirect: redirect_count
+ # should be 0 and is_redirect should be false.
+ expected_request = {"method": "GET", "url": redirected_url}
+ assert_before_request_sent_event(
+ events[1],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+ # Check that the http-equiv redirect request has a different requestId
+ assert events[0]["request"]["request"] != events[1]["request"]["request"]
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/combined/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/network/combined/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/combined/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/combined/network_events_tentative.py b/testing/web-platform/tests/webdriver/tests/bidi/network/combined/network_events_tentative.py
new file mode 100644
index 0000000000..2defc6c205
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/combined/network_events_tentative.py
@@ -0,0 +1,124 @@
+import asyncio
+
+import pytest
+
+from .. import (
+ assert_before_request_sent_event,
+ assert_response_event,
+)
+
+PAGE_EMPTY_HTML = "/webdriver/tests/bidi/network/support/empty.html"
+PAGE_EMPTY_TEXT = "/webdriver/tests/bidi/network/support/empty.txt"
+
+# The following tests are marked as tentative until
+# https://github.com/w3c/webdriver-bidi/pull/204 is merged.
+
+
+@pytest.mark.asyncio
+async def test_same_request_id(
+ bidi_session, top_context, wait_for_event, url, setup_network_test, fetch
+):
+ network_events = await setup_network_test(
+ events=[
+ "network.beforeRequestSent",
+ "network.responseStarted",
+ "network.responseCompleted",
+ ]
+ )
+ before_request_sent_events = network_events["network.beforeRequestSent"]
+ response_started_events = network_events["network.responseStarted"]
+ response_completed_events = network_events["network.responseCompleted"]
+
+ text_url = url(PAGE_EMPTY_TEXT)
+ on_response_completed = wait_for_event("network.responseCompleted")
+ await fetch(text_url)
+ await on_response_completed
+
+ assert len(before_request_sent_events) == 1
+ assert len(response_started_events) == 1
+ assert len(response_completed_events) == 1
+ expected_request = {"method": "GET", "url": text_url}
+ assert_before_request_sent_event(
+ before_request_sent_events[0], expected_request=expected_request
+ )
+
+ expected_response = {"url": text_url}
+ assert_response_event(
+ response_started_events[0],
+ expected_request=expected_request,
+ expected_response=expected_response,
+ )
+ assert_response_event(
+ response_completed_events[0],
+ expected_request=expected_request,
+ expected_response=expected_response,
+ )
+
+ assert (
+ before_request_sent_events[0]["request"]["request"]
+ == response_started_events[0]["request"]["request"]
+ )
+
+ assert (
+ before_request_sent_events[0]["request"]["request"]
+ == response_completed_events[0]["request"]["request"]
+ )
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_one_context(
+ bidi_session, top_context, wait_for_event, url, fetch, setup_network_test
+):
+ other_context = await bidi_session.browsing_context.create(type_hint="tab")
+ await bidi_session.browsing_context.navigate(
+ context=other_context["context"],
+ url=url(PAGE_EMPTY_HTML),
+ wait="complete",
+ )
+
+ network_events = await setup_network_test(
+ events=[
+ "network.beforeRequestSent",
+ "network.responseStarted",
+ "network.responseCompleted",
+ ],
+ contexts=[top_context["context"]],
+ )
+
+ # Perform a fetch request in the subscribed context and wait for the response completed event.
+ text_url = url(PAGE_EMPTY_TEXT)
+ on_response_completed = wait_for_event("network.responseCompleted")
+ await fetch(text_url, context=top_context)
+ await on_response_completed
+
+ assert len(network_events["network.beforeRequestSent"]) == 1
+ assert len(network_events["network.responseStarted"]) == 1
+ assert len(network_events["network.responseCompleted"]) == 1
+
+ # Check the received events have the correct context.
+ expected_request = {"method": "GET", "url": text_url}
+ expected_response = {"url": text_url}
+ assert_before_request_sent_event(
+ network_events["network.beforeRequestSent"][0],
+ expected_request=expected_request,
+ context=top_context["context"],
+ )
+ assert_response_event(
+ network_events["network.responseStarted"][0],
+ expected_response=expected_response,
+ context=top_context["context"],
+ )
+ assert_response_event(
+ network_events["network.responseCompleted"][0],
+ expected_response=expected_response,
+ context=top_context["context"],
+ )
+
+ # Perform another fetch request in the other context.
+ await fetch(text_url, context=other_context)
+ await asyncio.sleep(0.5)
+
+ # Check that no other event was received.
+ assert len(network_events["network.beforeRequestSent"]) == 1
+ assert len(network_events["network.responseStarted"]) == 1
+ assert len(network_events["network.responseCompleted"]) == 1
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/conftest.py b/testing/web-platform/tests/webdriver/tests/bidi/network/conftest.py
new file mode 100644
index 0000000000..3dd80f2896
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/conftest.py
@@ -0,0 +1,90 @@
+import json
+
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget
+
+RESPONSE_COMPLETED_EVENT = "network.responseCompleted"
+
+PAGE_EMPTY_HTML = "/webdriver/tests/bidi/network/support/empty.html"
+
+
+@pytest.fixture
+def fetch(bidi_session, top_context):
+ """Perform a fetch from the page of the provided context, default to the
+ top context.
+ """
+ async def fetch(url, method="GET", headers=None, context=top_context):
+ method_arg = f"method: '{method}',"
+
+ headers_arg = ""
+ if headers != None:
+ headers_arg = f"headers: {json.dumps(headers)},"
+
+ # Wait for fetch() to resolve a response and for response.text() to
+ # resolve as well to make sure the request/response is completed when
+ # the helper returns.
+ await bidi_session.script.evaluate(
+ expression=f"""
+ fetch("{url}", {{
+ {method_arg}
+ {headers_arg}
+ }}).then(response => response.text());""",
+ target=ContextTarget(context["context"]),
+ await_promise=True,
+ )
+
+ return fetch
+
+
+@pytest.fixture
+async def setup_network_test(
+ bidi_session, subscribe_events, wait_for_event, top_context, url
+):
+ """Navigate the current top level context to the provided url and subscribe
+ to network.beforeRequestSent.
+
+ Returns an `events` dictionary in which the captured network events will be added.
+ The keys of the dictionary are network event names (eg. "network.beforeRequestSent"),
+ and the value is an array of collected events.
+ """
+ listeners = []
+
+ async def _setup_network_test(events, test_url=url(PAGE_EMPTY_HTML), contexts=None):
+ nonlocal listeners
+
+ # Listen for network.responseCompleted for the initial navigation to
+ # make sure this event will not be captured unexpectedly by the tests.
+ await bidi_session.session.subscribe(
+ events=[RESPONSE_COMPLETED_EVENT], contexts=[top_context["context"]]
+ )
+ on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT)
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"],
+ url=test_url,
+ wait="complete",
+ )
+ await on_response_completed
+ await bidi_session.session.unsubscribe(
+ events=[RESPONSE_COMPLETED_EVENT], contexts=[top_context["context"]]
+ )
+
+ await subscribe_events(events, contexts)
+
+ network_events = {}
+ for event in events:
+ network_events[event] = []
+
+ async def on_event(method, data, event=event):
+ network_events[event].append(data)
+
+ listeners.append(bidi_session.add_event_listener(event, on_event))
+
+ return network_events
+
+ yield _setup_network_test
+
+ # cleanup
+ for remove_listener in listeners:
+ remove_listener()
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed_tentative.py b/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed_tentative.py
new file mode 100644
index 0000000000..347f970c61
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/response_completed/response_completed_tentative.py
@@ -0,0 +1,274 @@
+import asyncio
+import json
+
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget
+
+from tests.support.sync import AsyncPoll
+
+from ... import any_int
+from .. import assert_response_event, HTTP_STATUS_AND_STATUS_TEXT
+
+PAGE_EMPTY_HTML = "/webdriver/tests/bidi/network/support/empty.html"
+PAGE_EMPTY_IMAGE = "/webdriver/tests/bidi/network/support/empty.png"
+PAGE_EMPTY_SCRIPT = "/webdriver/tests/bidi/network/support/empty.js"
+PAGE_EMPTY_SVG = "/webdriver/tests/bidi/network/support/empty.svg"
+PAGE_EMPTY_TEXT = "/webdriver/tests/bidi/network/support/empty.txt"
+
+RESPONSE_COMPLETED_EVENT = "network.responseCompleted"
+
+# The following tests are marked as tentative until
+# https://github.com/w3c/webdriver-bidi/pull/204 is merged.
+
+
+@pytest.mark.asyncio
+async def test_subscribe_status(bidi_session, top_context, wait_for_event, url, fetch):
+ await bidi_session.session.subscribe(events=[RESPONSE_COMPLETED_EVENT])
+
+ # Track all received network.responseCompleted events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener(
+ RESPONSE_COMPLETED_EVENT, on_event
+ )
+
+ html_url = url(PAGE_EMPTY_HTML)
+ on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT)
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"],
+ url=html_url,
+ wait="complete",
+ )
+ await on_response_completed
+
+ assert len(events) == 1
+ expected_request = {"method": "GET", "url": html_url}
+ expected_response = {
+ "url": url(PAGE_EMPTY_HTML),
+ "fromCache": False,
+ "mimeType": "text/html",
+ "status": 200,
+ "statusText": "OK",
+ }
+ assert_response_event(
+ events[0],
+ expected_request=expected_request,
+ expected_response=expected_response,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+ text_url = url(PAGE_EMPTY_TEXT)
+ on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT)
+ await fetch(text_url)
+ await on_response_completed
+
+ assert len(events) == 2
+ expected_request = {"method": "GET", "url": text_url}
+ expected_response = {
+ "url": text_url,
+ "fromCache": False,
+ "mimeType": "text/plain",
+ "status": 200,
+ "statusText": "OK",
+ }
+ assert_response_event(
+ events[1],
+ expected_request=expected_request,
+ expected_response=expected_response,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+ await bidi_session.session.unsubscribe(events=[RESPONSE_COMPLETED_EVENT])
+
+ # Fetch the text url again, with an additional parameter to bypass the cache
+ # and check no new event is received.
+ await fetch(f"{text_url}?nocache")
+ await asyncio.sleep(0.5)
+ assert len(events) == 2
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_load_page_twice(
+ bidi_session, top_context, wait_for_event, url, fetch, setup_network_test
+):
+ html_url = url(PAGE_EMPTY_HTML)
+
+ network_events = await setup_network_test(events=[RESPONSE_COMPLETED_EVENT])
+ events = network_events[RESPONSE_COMPLETED_EVENT]
+
+ on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT)
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"],
+ url=html_url,
+ wait="complete",
+ )
+ await on_response_completed
+
+ assert len(events) == 1
+ expected_request = {"method": "GET", "url": html_url}
+ expected_response = {
+ "url": html_url,
+ "fromCache": False,
+ "mimeType": "text/html",
+ "status": 200,
+ "statusText": "OK",
+ "protocol": "http/1.1",
+ }
+ assert_response_event(
+ events[0],
+ expected_request=expected_request,
+ expected_response=expected_response,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.parametrize(
+ "status, status_text",
+ HTTP_STATUS_AND_STATUS_TEXT,
+)
+@pytest.mark.asyncio
+async def test_response_status(
+ bidi_session, wait_for_event, url, fetch, setup_network_test, status, status_text
+):
+ status_url = url(
+ f"/webdriver/tests/support/http_handlers/status.py?status={status}&nocache={RESPONSE_COMPLETED_EVENT}"
+ )
+
+ network_events = await setup_network_test(events=[RESPONSE_COMPLETED_EVENT])
+ events = network_events[RESPONSE_COMPLETED_EVENT]
+
+ on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT)
+ await fetch(status_url)
+ await on_response_completed
+
+ assert len(events) == 1
+ expected_request = {"method": "GET", "url": status_url}
+ expected_response = {
+ "url": status_url,
+ "fromCache": False,
+ "mimeType": "text/plain",
+ "status": status,
+ "statusText": status_text,
+ "protocol": "http/1.1",
+ }
+ assert_response_event(
+ events[0],
+ expected_response=expected_response,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.asyncio
+async def test_response_headers(
+ bidi_session, wait_for_event, url, fetch, setup_network_test
+):
+ headers_url = url(
+ "/webdriver/tests/support/http_handlers/headers.py?header=foo:bar&header=baz:biz"
+ )
+
+ network_events = await setup_network_test(events=[RESPONSE_COMPLETED_EVENT])
+ events = network_events[RESPONSE_COMPLETED_EVENT]
+
+ on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT)
+ await fetch(headers_url, method="GET")
+ await on_response_completed
+
+ assert len(events) == 1
+
+ expected_request = {"method": "GET", "url": headers_url}
+ expected_response = {
+ "url": headers_url,
+ "fromCache": False,
+ "mimeType": "text/plain",
+ "status": 200,
+ "statusText": "OK",
+ "headers": (
+ {"name": "foo", "value": "bar"},
+ {"name": "baz", "value": "biz"},
+ ),
+ "protocol": "http/1.1",
+ }
+ assert_response_event(
+ events[0],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.parametrize(
+ "page_url, mime_type",
+ [
+ (PAGE_EMPTY_HTML, "text/html"),
+ (PAGE_EMPTY_TEXT, "text/plain"),
+ (PAGE_EMPTY_SCRIPT, "text/javascript"),
+ (PAGE_EMPTY_IMAGE, "image/png"),
+ (PAGE_EMPTY_SVG, "image/svg+xml"),
+ ],
+)
+@pytest.mark.asyncio
+async def test_response_mime_type_file(
+ bidi_session, url, wait_for_event, fetch, setup_network_test, page_url, mime_type
+):
+ network_events = await setup_network_test(events=[RESPONSE_COMPLETED_EVENT])
+ events = network_events[RESPONSE_COMPLETED_EVENT]
+
+ on_response_completed = wait_for_event(RESPONSE_COMPLETED_EVENT)
+ await fetch(url(page_url), method="GET")
+ await on_response_completed
+
+ assert len(events) == 1
+
+ expected_request = {"method": "GET", "url": url(page_url)}
+ expected_response = {"url": url(page_url), "mimeType": mime_type}
+ assert_response_event(
+ events[0],
+ expected_request=expected_request,
+ expected_response=expected_response,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.asyncio
+async def test_redirect(bidi_session, wait_for_event, url, fetch, setup_network_test):
+ text_url = url(PAGE_EMPTY_TEXT)
+ redirect_url = url(
+ f"/webdriver/tests/support/http_handlers/redirect.py?location={text_url}"
+ )
+
+ network_events = await setup_network_test(events=[RESPONSE_COMPLETED_EVENT])
+ events = network_events[RESPONSE_COMPLETED_EVENT]
+
+ await fetch(redirect_url, method="GET")
+
+ # Wait until we receive two events, one for the initial request and one for
+ # the redirection.
+ wait = AsyncPoll(bidi_session, timeout=2)
+ await wait.until(lambda _: len(events) >= 2)
+
+ assert len(events) == 2
+ expected_request = {"method": "GET", "url": redirect_url}
+ assert_response_event(
+ events[0],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+ expected_request = {"method": "GET", "url": text_url}
+ assert_response_event(
+ events[1], expected_request=expected_request, redirect_count=1, is_redirect=True
+ )
+
+ # Check that both requests share the same requestId
+ assert events[0]["request"]["request"] == events[1]["request"]["request"]
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started_tentative.py b/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started_tentative.py
new file mode 100644
index 0000000000..18c78c6922
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/response_started/response_started_tentative.py
@@ -0,0 +1,250 @@
+import asyncio
+import json
+
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget
+
+from tests.support.sync import AsyncPoll
+
+from ... import any_int
+from .. import assert_response_event, HTTP_STATUS_AND_STATUS_TEXT
+
+PAGE_EMPTY_HTML = "/webdriver/tests/bidi/network/support/empty.html"
+PAGE_EMPTY_IMAGE = "/webdriver/tests/bidi/network/support/empty.png"
+PAGE_EMPTY_SCRIPT = "/webdriver/tests/bidi/network/support/empty.js"
+PAGE_EMPTY_SVG = "/webdriver/tests/bidi/network/support/empty.svg"
+PAGE_EMPTY_TEXT = "/webdriver/tests/bidi/network/support/empty.txt"
+
+RESPONSE_STARTED_EVENT = "network.responseStarted"
+
+# The following tests are marked as tentative until
+# https://github.com/w3c/webdriver-bidi/pull/204 is merged.
+
+
+@pytest.mark.asyncio
+async def test_subscribe_status(bidi_session, top_context, wait_for_event, url, fetch):
+ await bidi_session.session.subscribe(events=[RESPONSE_STARTED_EVENT])
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"],
+ url=url(PAGE_EMPTY_HTML),
+ wait="complete",
+ )
+
+ # Track all received network.responseStarted events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener(
+ RESPONSE_STARTED_EVENT, on_event
+ )
+
+ text_url = url(PAGE_EMPTY_TEXT)
+ on_response_started = wait_for_event(RESPONSE_STARTED_EVENT)
+ await fetch(text_url)
+ await on_response_started
+
+ assert len(events) == 1
+ expected_request = {"method": "GET", "url": text_url}
+ expected_response = {
+ "url": text_url,
+ "fromCache": False,
+ "mimeType": "text/plain",
+ "status": 200,
+ "statusText": "OK",
+ }
+ assert_response_event(
+ events[0],
+ expected_request=expected_request,
+ expected_response=expected_response,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+ await bidi_session.session.unsubscribe(events=[RESPONSE_STARTED_EVENT])
+
+ # Fetch the text url again, with an additional parameter to bypass the cache
+ # and check no new event is received.
+ await fetch(f"{text_url}?nocache")
+ await asyncio.sleep(0.5)
+ assert len(events) == 1
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_load_page_twice(
+ bidi_session, top_context, wait_for_event, url, fetch, setup_network_test
+):
+ html_url = url(PAGE_EMPTY_HTML)
+
+ network_events = await setup_network_test(events=[RESPONSE_STARTED_EVENT])
+ events = network_events[RESPONSE_STARTED_EVENT]
+
+ on_response_started = wait_for_event(RESPONSE_STARTED_EVENT)
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"],
+ url=html_url,
+ wait="complete",
+ )
+ await on_response_started
+
+ assert len(events) == 1
+ expected_request = {"method": "GET", "url": html_url}
+ expected_response = {
+ "url": html_url,
+ "fromCache": False,
+ "mimeType": "text/html",
+ "status": 200,
+ "statusText": "OK",
+ "protocol": "http/1.1",
+ }
+ assert_response_event(
+ events[0],
+ expected_request=expected_request,
+ expected_response=expected_response,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.parametrize(
+ "status, status_text",
+ HTTP_STATUS_AND_STATUS_TEXT,
+)
+@pytest.mark.asyncio
+async def test_response_status(
+ bidi_session, wait_for_event, url, fetch, setup_network_test, status, status_text
+):
+ status_url = url(f"/webdriver/tests/support/http_handlers/status.py?status={status}&nocache={RESPONSE_STARTED_EVENT}")
+
+ network_events = await setup_network_test(events=[RESPONSE_STARTED_EVENT])
+ events = network_events[RESPONSE_STARTED_EVENT]
+
+ on_response_started = wait_for_event(RESPONSE_STARTED_EVENT)
+ await fetch(status_url)
+ await on_response_started
+
+ assert len(events) == 1
+ expected_request = {"method": "GET", "url": status_url}
+ expected_response = {
+ "url": status_url,
+ "fromCache": False,
+ "mimeType": "text/plain",
+ "status": status,
+ "statusText": status_text,
+ "protocol": "http/1.1",
+ }
+ assert_response_event(
+ events[0],
+ expected_response=expected_response,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.asyncio
+async def test_response_headers(
+ bidi_session, wait_for_event, url, fetch, setup_network_test
+):
+ headers_url = url(
+ "/webdriver/tests/support/http_handlers/headers.py?header=foo:bar&header=baz:biz"
+ )
+
+ network_events = await setup_network_test(events=[RESPONSE_STARTED_EVENT])
+ events = network_events[RESPONSE_STARTED_EVENT]
+
+ on_response_started = wait_for_event(RESPONSE_STARTED_EVENT)
+ await fetch(headers_url, method="GET")
+ await on_response_started
+
+ assert len(events) == 1
+
+ expected_request = {"method": "GET", "url": headers_url}
+ expected_response = {
+ "url": headers_url,
+ "fromCache": False,
+ "mimeType": "text/plain",
+ "status": 200,
+ "statusText": "OK",
+ "headers": (
+ {"name": "foo", "value": "bar"},
+ {"name": "baz", "value": "biz"},
+ ),
+ "protocol": "http/1.1",
+ }
+ assert_response_event(
+ events[0],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.parametrize(
+ "page_url, mime_type",
+ [
+ (PAGE_EMPTY_HTML, "text/html"),
+ (PAGE_EMPTY_TEXT, "text/plain"),
+ (PAGE_EMPTY_SCRIPT, "text/javascript"),
+ (PAGE_EMPTY_IMAGE, "image/png"),
+ (PAGE_EMPTY_SVG, "image/svg+xml"),
+ ],
+)
+@pytest.mark.asyncio
+async def test_response_mime_type_file(
+ bidi_session, url, wait_for_event, fetch, setup_network_test, page_url, mime_type
+):
+ network_events = await setup_network_test(events=[RESPONSE_STARTED_EVENT])
+ events = network_events[RESPONSE_STARTED_EVENT]
+
+ on_response_started = wait_for_event(RESPONSE_STARTED_EVENT)
+ await fetch(url(page_url), method="GET")
+ await on_response_started
+
+ assert len(events) == 1
+
+ expected_request = {"method": "GET", "url": url(page_url)}
+ expected_response = {"url": url(page_url), "mimeType": mime_type}
+ assert_response_event(
+ events[0],
+ expected_request=expected_request,
+ expected_response=expected_response,
+ redirect_count=0,
+ is_redirect=False,
+ )
+
+
+@pytest.mark.asyncio
+async def test_redirect(bidi_session, wait_for_event, url, fetch, setup_network_test):
+ text_url = url(PAGE_EMPTY_TEXT)
+ redirect_url = url(f"/webdriver/tests/support/http_handlers/redirect.py?location={text_url}")
+
+ network_events = await setup_network_test(events=[RESPONSE_STARTED_EVENT])
+ events = network_events[RESPONSE_STARTED_EVENT]
+
+ await fetch(redirect_url, method="GET")
+
+ # Wait until we receive two events, one for the initial request and one for
+ # the redirection.
+ wait = AsyncPoll(bidi_session, timeout=2)
+ await wait.until(lambda _: len(events) >= 2)
+
+ assert len(events) == 2
+ expected_request = {"method": "GET", "url": redirect_url}
+ assert_response_event(
+ events[0],
+ expected_request=expected_request,
+ redirect_count=0,
+ is_redirect=False,
+ )
+ expected_request = {"method": "GET", "url": text_url}
+ assert_response_event(
+ events[1], expected_request=expected_request, redirect_count=1, is_redirect=True
+ )
+
+ # Check that both requests share the same requestId
+ assert events[0]["request"]["request"] == events[1]["request"]["request"]
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.html b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.html
new file mode 100644
index 0000000000..69e9da4114
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.html
@@ -0,0 +1,2 @@
+<!DOCTYPE html>
+<html></html>
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.js b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.js
new file mode 100644
index 0000000000..3918c74e44
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.js
@@ -0,0 +1 @@
+"use strict";
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.png b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.png
new file mode 100644
index 0000000000..afb763ce9d
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.png
Binary files differ
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.svg b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.svg
new file mode 100644
index 0000000000..158b3aac16
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.svg
@@ -0,0 +1 @@
+<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"></svg>
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.txt b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.txt
new file mode 100644
index 0000000000..c6cac69265
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/empty.txt
@@ -0,0 +1 @@
+empty
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirect_http_equiv.html b/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirect_http_equiv.html
new file mode 100644
index 0000000000..9b588c67ef
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirect_http_equiv.html
@@ -0,0 +1,4 @@
+<!DOCTYPE html>
+<head>
+ <meta http-equiv="refresh" content="0;redirected.html" />
+</head>
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirected.html b/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirected.html
new file mode 100644
index 0000000000..3732b218cf
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/network/support/redirected.html
@@ -0,0 +1,2 @@
+<!DOCTYPE html>
+<html>redirected</html>
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/__init__.py
new file mode 100644
index 0000000000..51fc26ce56
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/__init__.py
@@ -0,0 +1,50 @@
+from typing import Any, Callable, Mapping
+
+from .. import any_int, any_string, recursive_compare
+
+
+def assert_handle(obj: Mapping[str, Any], should_contain_handle: bool) -> None:
+ if should_contain_handle:
+ assert "handle" in obj, f"Result should contain `handle`. Actual: {obj}"
+ assert isinstance(obj["handle"], str), f"`handle` should be a string, but was {type(obj['handle'])}"
+ else:
+ assert "handle" not in obj, f"Result should not contain `handle`. Actual: {obj}"
+
+
+def specific_error_response(expected_error: Mapping[str, Any]) -> Callable[[Any], None]:
+ return lambda actual: recursive_compare(
+ {
+ "realm": any_string,
+ "exceptionDetails": {
+ "columnNumber": any_int,
+ "exception": expected_error,
+ "lineNumber": any_int,
+ "stackTrace": any_stack_trace,
+ "text": any_string,
+ },
+ },
+ actual)
+
+
+def any_stack_trace(actual: Any) -> None:
+ assert type(actual) is dict
+ assert "callFrames" in actual
+ assert type(actual["callFrames"]) is list
+ for actual_frame in actual["callFrames"]:
+ any_stack_frame(actual_frame)
+
+
+def any_stack_frame(actual: Any) -> None:
+ assert type(actual) is dict
+
+ assert "columnNumber" in actual
+ any_int(actual["columnNumber"])
+
+ assert "functionName" in actual
+ any_string(actual["functionName"])
+
+ assert "lineNumber" in actual
+ any_int(actual["lineNumber"])
+
+ assert "url" in actual
+ any_string(actual["url"])
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/arguments.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/arguments.py
new file mode 100644
index 0000000000..9fbe5fb811
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/arguments.py
@@ -0,0 +1,385 @@
+import pytest
+from webdriver.bidi.modules.script import ContextTarget
+
+from ... import recursive_compare
+
+
+@pytest.mark.asyncio
+async def test_arguments(bidi_session, top_context):
+ result = await bidi_session.script.call_function(
+ function_declaration="(...args)=>{return args}",
+ arguments=[{
+ "type": "string",
+ "value": "ARGUMENT_STRING_VALUE"
+ }, {
+ "type": "number",
+ "value": 42}],
+ await_promise=False,
+ target=ContextTarget(top_context["context"]))
+
+ recursive_compare({
+ "type": "array",
+ "value": [{
+ "type": 'string',
+ "value": 'ARGUMENT_STRING_VALUE'
+ }, {
+ "type": 'number',
+ "value": 42}]},
+ result)
+
+
+@pytest.mark.asyncio
+async def test_default_arguments(bidi_session, top_context):
+ result = await bidi_session.script.call_function(
+ function_declaration="(...args)=>{return args}",
+ await_promise=False,
+ target=ContextTarget(top_context["context"]))
+
+ recursive_compare({
+ "type": "array",
+ "value": []
+ }, result)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "setup_expression, function_declaration, expected",
+ [
+ (
+ "Symbol('foo')",
+ "(symbol) => symbol.toString()",
+ {"type": "string", "value": "Symbol(foo)"},
+ ),
+ ("[1,2]", "(array) => array[0]", {"type": "number", "value": 1}),
+ (
+ "new RegExp('foo')",
+ "(regexp) => regexp.source",
+ {"type": "string", "value": "foo"},
+ ),
+ (
+ "new Date(1654004849000)",
+ "(date) => date.toISOString()",
+ {"type": "string", "value": "2022-05-31T13:47:29.000Z"},
+ ),
+ (
+ "new Map([['foo', 'bar']])",
+ "(map) => map.get('foo')",
+ {"type": "string", "value": "bar"},
+ ),
+ (
+ "new Set(['foo'])",
+ "(set) => set.has('foo')",
+ {"type": "boolean", "value": True},
+ ),
+ (
+ "{const weakMap = new WeakMap(); weakMap.set(weakMap, 'foo')}",
+ "(weakMap)=> weakMap.get(weakMap)",
+ {"type": "string", "value": "foo"},
+ ),
+ (
+ "{const weakSet = new WeakSet(); weakSet.add(weakSet)}",
+ "(weakSet)=> weakSet.has(weakSet)",
+ {"type": "boolean", "value": True},
+ ),
+ (
+ "new Error('error message')",
+ "(error) => error.message",
+ {"type": "string", "value": "error message"},
+ ),
+ (
+ "new SyntaxError('syntax error message')",
+ "(error) => error.message",
+ {"type": "string", "value": "syntax error message"},
+ ),
+ (
+ "new Promise((resolve) => resolve(3))",
+ "(promise) => promise",
+ {"type": "number", "value": 3},
+ ),
+ (
+ "new Int8Array(2)",
+ "(int8Array) => int8Array.length",
+ {"type": "number", "value": 2},
+ ),
+ (
+ "new ArrayBuffer(8)",
+ "(arrayBuffer) => arrayBuffer.byteLength",
+ {"type": "number", "value": 8},
+ ),
+ ("() => true", "(func) => func()", {"type": "boolean", "value": True}),
+ (
+ "(function() {return false;})",
+ "(func) => func()",
+ {"type": "boolean", "value": False},
+ ),
+ (
+ "document.createElement('div')",
+ "(node) => node.tagName",
+ {"type": "string", "value": "DIV"},
+ ),
+ (
+ "window.foo = 3; window",
+ "(window) => window.foo",
+ {"type": "number", "value": 3},
+ ),
+ (
+ "window.url = new URL('https://example.com'); window.url",
+ "(url) => url.hostname",
+ {"type": "string", "value": "example.com"},
+ ),
+ (
+ "({SOME_PROPERTY:'SOME_VALUE'})",
+ "(obj) => obj.SOME_PROPERTY",
+ {"type": "string", "value": "SOME_VALUE"},
+ ),
+ ],
+)
+async def test_remote_value_argument(
+ bidi_session, top_context, setup_expression, function_declaration, expected
+):
+ remote_value_result = await bidi_session.script.evaluate(
+ expression=setup_expression,
+ await_promise=False,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"]),
+ )
+ remote_value_handle = remote_value_result.get("handle")
+
+ assert isinstance(remote_value_handle, str)
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ arguments=[{"handle": remote_value_handle}],
+ await_promise=True if remote_value_result["type"] == "promise" else False,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "argument, expected",
+ [
+ ({"type": "undefined"}, "undefined"),
+ ({"type": "null"}, "null"),
+ ({"type": "string", "value": "foobar"}, "'foobar'"),
+ ({"type": "string", "value": "2"}, "'2'"),
+ ({"type": "number", "value": "-0"}, "-0"),
+ ({"type": "number", "value": "Infinity"}, "Infinity"),
+ ({"type": "number", "value": "-Infinity"}, "-Infinity"),
+ ({"type": "number", "value": 3}, "3"),
+ ({"type": "number", "value": 1.4}, "1.4"),
+ ({"type": "boolean", "value": True}, "true"),
+ ({"type": "boolean", "value": False}, "false"),
+ ({"type": "bigint", "value": "42"}, "42n"),
+ ],
+)
+async def test_primitive_values(bidi_session, top_context, argument, expected):
+ result = await bidi_session.script.call_function(
+ function_declaration=f"""(arg) => {{
+ if(arg!=={expected})
+ throw Error("Argument should be {expected}, but was "+arg);
+ return arg;
+ }}""",
+ arguments=[argument],
+ await_promise=False,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ recursive_compare(argument, result)
+
+
+@pytest.mark.asyncio
+async def test_nan(bidi_session, top_context):
+ nan_remote_value = {"type": "number", "value": "NaN"}
+ result = await bidi_session.script.call_function(
+ function_declaration=f"""(arg) => {{
+ if(!isNaN(arg))
+ throw Error("Argument should be 'NaN', but was "+arg);
+ return arg;
+ }}""",
+ arguments=[nan_remote_value],
+ await_promise=False,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ recursive_compare(nan_remote_value, result)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "argument, expected_type",
+ [
+ ({
+ "type": "array",
+ "value": [
+ {"type": "string", "value": "foobar"},
+ ],
+ },
+ "Array"
+ ),
+ ({"type": "date", "value": "2022-05-31T13:47:29.000Z"},
+ "Date"
+ ),
+ ({
+ "type": "map",
+ "value": [
+ ["foobar", {"type": "string", "value": "foobar"}],
+ ],
+ },
+ "Map"
+ ),
+ ({
+ "type": "object",
+ "value": [
+ ["foobar", {"type": "string", "value": "foobar"}],
+ ],
+ },
+ "Object"
+ ),
+ ({"type": "regexp", "value": {"pattern": "foo", "flags": "g"}},
+ "RegExp"
+ ),
+ ({
+ "type": "set",
+ "value": [
+ {"type": "string", "value": "foobar"},
+ ],
+ },
+ "Set"
+ )
+ ],
+)
+async def test_local_values(bidi_session, top_context, argument, expected_type):
+ result = await bidi_session.script.call_function(
+ function_declaration=f"""(arg) => {{
+ if(! (arg instanceof {expected_type}))
+ throw Error("Argument type should be {expected_type}, but was "+
+ Object.prototype.toString.call(arg));
+ return arg;
+ }}""",
+ arguments=[argument],
+ await_promise=False,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ recursive_compare(argument, result)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "value_fn, function_declaration",
+ [
+ (
+ lambda value: value,
+ "function(arg) { return arg === window.SOME_OBJECT; }",
+ ),
+ (
+ lambda value: ({"type": "object", "value": [["nested", value]]}),
+ "function(arg) { return arg.nested === window.SOME_OBJECT; }",
+ ),
+ (
+ lambda value: ({"type": "array", "value": [value]}),
+ "function(arg) { return arg[0] === window.SOME_OBJECT; }",
+ ),
+ (
+ lambda value: ({"type": "map", "value": [["foobar", value]]}),
+ "function(arg) { return arg.get('foobar') === window.SOME_OBJECT; }",
+ ),
+ (
+ lambda value: ({"type": "set", "value": [value]}),
+ "function(arg) { return arg.has(window.SOME_OBJECT); }",
+ ),
+ ],
+)
+async def test_remote_value_deserialization(
+ bidi_session, top_context, call_function, evaluate, value_fn, function_declaration
+):
+ remote_value = await evaluate(
+ "window.SOME_OBJECT = {SOME_PROPERTY:'SOME_VALUE'}; window.SOME_OBJECT",
+ result_ownership="root",
+ )
+
+ # Check that a remote value can be successfully deserialized as an "argument"
+ # parameter and compared against the original object in the page.
+ result = await call_function(
+ function_declaration=function_declaration,
+ arguments=[value_fn(remote_value)],
+ )
+ assert result == {"type": "boolean", "value": True}
+
+ # Reload the page to cleanup the state
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=top_context["url"], wait="complete"
+ )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, function_declaration, expected",
+ [
+ (
+ "document.getElementsByTagName('span')",
+ "(collection) => collection.item(0)",
+ {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "span",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }
+ ),
+ (
+ "document.querySelectorAll('span')",
+ "(nodeList) => nodeList.item(0)",
+ {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "span",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }
+ ),
+ ], ids=[
+ "htmlcollection",
+ "nodelist"
+ ]
+)
+async def test_remote_value_dom_collection(
+ bidi_session,
+ inline,
+ top_context,
+ call_function,
+ expression,
+ function_declaration,
+ expected
+):
+ page_url = inline("""<p><span>""")
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=page_url, wait="complete"
+ )
+
+ remote_value = await bidi_session.script.evaluate(
+ expression=expression,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ # Check that a remote value can be successfully deserialized as an "argument"
+ # parameter and the first element be extracted.
+ result = await call_function(
+ function_declaration=function_declaration,
+ arguments=[remote_value],
+ )
+
+ assert result == expected
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/await_promise.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/await_promise.py
new file mode 100644
index 0000000000..f31d35cd80
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/await_promise.py
@@ -0,0 +1,48 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException
+
+from ... import any_int, any_string, recursive_compare
+from .. import any_stack_trace
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("await_promise", [True, False])
+async def test_await_promise_delayed(bidi_session, top_context, await_promise):
+ result = await bidi_session.script.call_function(
+ function_declaration="""
+ async function() {{
+ await new Promise(r => setTimeout(() => r(), 0));
+ return "SOME_DELAYED_RESULT";
+ }}
+ """,
+ await_promise=await_promise,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ if await_promise:
+ assert result == {
+ "type": "string",
+ "value": "SOME_DELAYED_RESULT"}
+ else:
+ recursive_compare({
+ "type": "promise"},
+ result)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("await_promise", [True, False])
+async def test_await_promise_async_arrow(bidi_session, top_context, await_promise):
+ result = await bidi_session.script.call_function(
+ function_declaration="async ()=>{return 'SOME_VALUE'}",
+ await_promise=await_promise,
+ target=ContextTarget(top_context["context"]))
+
+ if await_promise:
+ assert result == {
+ "type": "string",
+ "value": "SOME_VALUE"}
+ else:
+ recursive_compare({
+ "type": "promise"},
+ result)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/exception_details.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/exception_details.py
new file mode 100644
index 0000000000..ef1118d0de
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/exception_details.py
@@ -0,0 +1,182 @@
+import pytest
+from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException
+
+from ... import any_int, any_string, recursive_compare
+from .. import any_stack_trace
+
+
+@pytest.mark.asyncio
+async def test_invalid_function(bidi_session, top_context):
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.call_function(
+ function_declaration="))) !!@@## some invalid JS script (((",
+ await_promise=False,
+ target=ContextTarget(top_context["context"]),
+ )
+ recursive_compare(
+ {
+ "realm": any_string,
+ "exceptionDetails": {
+ "columnNumber": any_int,
+ "exception": {"type": "error"},
+ "lineNumber": any_int,
+ "stackTrace": any_stack_trace,
+ "text": any_string,
+ },
+ },
+ exception.value.result,
+ )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("await_promise", [True, False])
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ ("undefined", {"type": "undefined"}),
+ ("null", {"type": "null"}),
+ ("'foobar'", {"type": "string", "value": "foobar"}),
+ ("'2'", {"type": "string", "value": "2"}),
+ ("Number.NaN", {"type": "number", "value": "NaN"}),
+ ("-0", {"type": "number", "value": "-0"}),
+ ("Infinity", {"type": "number", "value": "Infinity"}),
+ ("-Infinity", {"type": "number", "value": "-Infinity"}),
+ ("3", {"type": "number", "value": 3}),
+ ("1.4", {"type": "number", "value": 1.4}),
+ ("true", {"type": "boolean", "value": True}),
+ ("false", {"type": "boolean", "value": False}),
+ ("42n", {"type": "bigint", "value": "42"}),
+ ("(Symbol('foo'))", {"type": "symbol", },),
+ (
+ "[1, 'foo', true, new RegExp(/foo/g), [1]]",
+ {
+ "type": "array",
+ "value": [
+ {"type": "number", "value": 1},
+ {"type": "string", "value": "foo"},
+ {"type": "boolean", "value": True},
+ {
+ "type": "regexp",
+ "value": {
+ "pattern": "foo",
+ "flags": "g",
+ },
+ },
+ {"type": "array"},
+ ],
+ },
+ ),
+ (
+ "({'foo': {'bar': 'baz'}, 'qux': 'quux'})",
+ {
+ "type": "object",
+ "value": [
+ ["foo", {"type": "object"}],
+ ["qux", {"type": "string", "value": "quux"}],
+ ],
+ },
+ ),
+ ("(()=>{})", {"type": "function", },),
+ ("(function(){})", {"type": "function", },),
+ ("(async ()=>{})", {"type": "function", },),
+ ("(async function(){})", {"type": "function", },),
+ (
+ "new RegExp(/foo/g)",
+ {
+ "type": "regexp",
+ "value": {
+ "pattern": "foo",
+ "flags": "g",
+ },
+ },
+ ),
+ (
+ "new Date(1654004849000)",
+ {
+ "type": "date",
+ "value": "2022-05-31T13:47:29.000Z",
+ },
+ ),
+ (
+ "new Map([[1, 2], ['foo', 'bar'], [true, false], ['baz', [1]]])",
+ {
+ "type": "map",
+ "value": [
+ [
+ {"type": "number", "value": 1},
+ {"type": "number", "value": 2},
+ ],
+ ["foo", {"type": "string", "value": "bar"}],
+ [
+ {"type": "boolean", "value": True},
+ {"type": "boolean", "value": False},
+ ],
+ ["baz", {"type": "array"}],
+ ],
+ },
+ ),
+ (
+ "new Set([1, 'foo', true, [1], new Map([[1,2]])])",
+ {
+ "type": "set",
+ "value": [
+ {"type": "number", "value": 1},
+ {"type": "string", "value": "foo"},
+ {"type": "boolean", "value": True},
+ {"type": "array"},
+ {"type": "map"},
+ ],
+ },
+ ),
+ ("new WeakMap()", {"type": "weakmap", },),
+ ("new WeakSet()", {"type": "weakset", },),
+ ("new Error('SOME_ERROR_TEXT')", {"type": "error"},),
+ # TODO(sadym): add `iterator` test.
+ # TODO(sadym): add `generator` test.
+ # TODO(sadym): add `proxy` test.
+ ("Promise.resolve()", {"type": "promise", },),
+ ("new Int32Array()", {"type": "typedarray", },),
+ ("new ArrayBuffer()", {"type": "arraybuffer", },),
+ (
+ "document.createElement('div')",
+ {
+ "type": "node",
+ 'value': {
+ 'attributes': {},
+ 'childNodeCount': 0,
+ 'children': [],
+ 'localName': 'div',
+ 'namespaceURI': 'http://www.w3.org/1999/xhtml',
+ 'nodeType': 1,
+ }
+ },
+ ),
+ ("window", {"type": "window", },),
+ ],
+)
+@pytest.mark.asyncio
+async def test_exception_details(bidi_session, top_context, await_promise, expression, expected):
+ function_declaration = f"()=>{{ throw {expression} }}"
+ if await_promise:
+ function_declaration = "async" + function_declaration
+
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ await_promise=await_promise,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ recursive_compare(
+ {
+ "realm": any_string,
+ "exceptionDetails": {
+ "columnNumber": any_int,
+ "exception": expected,
+ "lineNumber": any_int,
+ "stackTrace": any_stack_trace,
+ "text": any_string,
+ },
+ },
+ exception.value.result,
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/function_declaration.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/function_declaration.py
new file mode 100644
index 0000000000..292e6da53b
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/function_declaration.py
@@ -0,0 +1,14 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget
+
+
+@pytest.mark.asyncio
+async def test_arrow_function(bidi_session, top_context):
+ result = await bidi_session.script.call_function(
+ function_declaration="()=>{return 1+2;}",
+ await_promise=False,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == {"type": "number", "value": 3}
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/internal_id.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/internal_id.py
new file mode 100644
index 0000000000..562084203a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/internal_id.py
@@ -0,0 +1,67 @@
+import pytest
+
+from ... import recursive_compare, any_string
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "return_structure, result_type",
+ [
+ ("[data, data]", "array"),
+ ("new Map([['foo', data],['bar', data]])", "map"),
+ ("({ 'foo': data, 'bar': data })", "object"),
+ ],
+)
+@pytest.mark.parametrize(
+ "expression, type",
+ [
+ ("[1]", "array"),
+ ("new Map([[true, false]])", "map"),
+ ("new Set(['baz'])", "set"),
+ ("{ baz: 'qux' }", "object"),
+ ],
+)
+async def test_remote_values_with_internal_id(
+ call_function, return_structure, result_type, expression, type
+):
+ result = await call_function(
+ f"() => {{ const data = {expression}; return {return_structure}; }}"
+ )
+ result_value = result["value"]
+
+ assert len(result_value) == 2
+
+ if result_type == "array":
+ value = [
+ {"type": type, "internalId": any_string},
+ {"type": type, "internalId": any_string},
+ ]
+ internalId1 = result_value[0]["internalId"]
+ internalId2 = result_value[1]["internalId"]
+ else:
+ value = [
+ ["foo", {"type": type, "internalId": any_string}],
+ ["bar", {"type": type, "internalId": any_string}],
+ ]
+ internalId1 = result_value[0][1]["internalId"]
+ internalId2 = result_value[1][1]["internalId"]
+
+ # Make sure that the same duplicated objects have the same internal ids
+ assert internalId1 == internalId2
+
+ recursive_compare(value, result_value)
+
+
+@pytest.mark.asyncio
+async def test_different_remote_values_have_unique_internal_ids(call_function):
+ result = await call_function(
+ "() => { const obj1 = [1]; const obj2 = {'foo': 'bar'}; return [obj1, obj2, obj1, obj2]; }"
+ )
+
+ assert len(result["value"]) == 4
+
+ internalId1 = result["value"][0]["internalId"]
+ internalId2 = result["value"][1]["internalId"]
+
+ # Make sure that different duplicated objects have different internal ids
+ assert internalId1 != internalId2
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid.py
new file mode 100644
index 0000000000..cf5a07f41b
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid.py
@@ -0,0 +1,123 @@
+import pytest
+import webdriver.bidi.error as error
+
+from webdriver.bidi.modules.script import ContextTarget, RealmTarget
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("target", [None, False, "foo", 42, {}, []])
+async def test_params_target_invalid_type(bidi_session, target):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ await_promise=False,
+ target=target)
+
+
+@pytest.mark.parametrize("context", [None, False, 42, {}, []])
+async def test_params_context_invalid_type(bidi_session, context):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ await_promise=False,
+ target=ContextTarget(context))
+
+
+@pytest.mark.parametrize("sandbox", [False, 42, {}, []])
+async def test_params_sandbox_invalid_type(bidi_session, top_context, sandbox):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ await_promise=False,
+ target=ContextTarget(top_context["context"],
+ sandbox))
+
+
+async def test_params_context_unknown(bidi_session):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ await_promise=False,
+ target=ContextTarget("_UNKNOWN_"))
+
+
+@pytest.mark.parametrize("realm", [None, False, 42, {}, []])
+async def test_params_realm_invalid_type(bidi_session, realm):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ await_promise=False,
+ target=RealmTarget(realm))
+
+
+async def test_params_realm_unknown(bidi_session):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ await_promise=False,
+ target=RealmTarget("_UNKNOWN_"))
+
+
+@pytest.mark.parametrize("function_declaration", [None, False, 42, {}, []])
+async def test_params_function_declaration_invalid_type(bidi_session, top_context,
+ function_declaration):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ await_promise=False,
+ target=ContextTarget(top_context["context"]))
+
+
+@pytest.mark.parametrize("this", [False, "SOME_STRING", 42, {}, []])
+async def test_params_this_invalid_type(bidi_session, top_context,
+ this):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ this=this,
+ await_promise=False,
+ target=ContextTarget(top_context["context"]))
+
+
+@pytest.mark.parametrize("arguments", [False, "SOME_STRING", 42, {}])
+async def test_params_arguments_invalid_type(bidi_session, top_context,
+ arguments):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ arguments=arguments,
+ await_promise=False,
+ target=ContextTarget(top_context["context"]))
+
+
+@pytest.mark.parametrize("argument", [False, "SOME_STRING", 42, {}, []])
+async def test_params_single_argument_invalid_type(bidi_session, top_context,
+ argument):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ arguments=[argument],
+ await_promise=False,
+ target=ContextTarget(top_context["context"]))
+
+
+@pytest.mark.parametrize("await_promise", [None, "False", 0, 42, {}, []])
+async def test_params_await_promise_invalid_type(bidi_session, top_context,
+ await_promise):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ await_promise=await_promise,
+ target=ContextTarget(top_context["context"]))
+
+
+@pytest.mark.parametrize("result_ownership", [False, "_UNKNOWN_", 42, {}, []])
+async def test_params_result_ownership_invalid_value(bidi_session, top_context,
+ result_ownership):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ await_promise=False,
+ target=ContextTarget(top_context["context"]),
+ result_ownership=result_ownership)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid_tentative.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid_tentative.py
new file mode 100644
index 0000000000..2726178e47
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/invalid_tentative.py
@@ -0,0 +1,38 @@
+import pytest
+import webdriver.bidi.error as error
+
+from webdriver.bidi.modules.script import ContextTarget
+
+pytestmark = pytest.mark.asyncio
+
+
+# The following tests are marked as tentative until
+# https://github.com/w3c/webdriver-bidi/issues/274 is resolved.
+async def test_params_target_invalid_value(bidi_session, top_context):
+ result = await bidi_session.script.call_function(
+ raw_result=True,
+ function_declaration="() => 1 + 2",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True,
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="() => 1 + 2",
+ target={"context": top_context["context"], "realm": result["realm"]},
+ await_promise=True,
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="() => 1 + 2",
+ target={"sandbox": "foo", "realm": result["realm"]},
+ await_promise=True,
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="() => 1 + 2",
+ target={"sandbox": "bar"},
+ await_promise=True,
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/realm.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/realm.py
new file mode 100644
index 0000000000..a8830230ee
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/realm.py
@@ -0,0 +1,71 @@
+import pytest
+
+from webdriver.bidi.modules.script import RealmTarget
+from ... import recursive_compare
+
+
+@pytest.mark.asyncio
+async def test_target_realm(bidi_session, default_realm):
+ result = await bidi_session.script.call_function(
+ raw_result=True,
+ function_declaration="() => { window.foo = 3; }",
+ target=RealmTarget(default_realm),
+ await_promise=True,
+ )
+
+ recursive_compare({"realm": default_realm, "result": {"type": "undefined"}}, result)
+
+ result = await bidi_session.script.call_function(
+ raw_result=True,
+ function_declaration="() => window.foo",
+ target=RealmTarget(default_realm),
+ await_promise=True,
+ )
+
+ recursive_compare(
+ {"realm": default_realm, "result": {"type": "number", "value": 3}}, result
+ )
+
+
+@pytest.mark.asyncio
+async def test_different_target_realm(bidi_session):
+ await bidi_session.browsing_context.create(type_hint="tab")
+
+ realms = await bidi_session.script.get_realms()
+ first_tab_default_realm = realms[0]["realm"]
+ second_tab_default_realm = realms[1]["realm"]
+
+ assert first_tab_default_realm != second_tab_default_realm
+
+ await bidi_session.script.call_function(
+ raw_result=True,
+ function_declaration="() => { window.foo = 3; }",
+ target=RealmTarget(first_tab_default_realm),
+ await_promise=True,
+ )
+ await bidi_session.script.call_function(
+ raw_result=True,
+ function_declaration="() => { window.foo = 5; }",
+ target=RealmTarget(second_tab_default_realm),
+ await_promise=True,
+ )
+
+ top_context_result = await bidi_session.script.call_function(
+ raw_result=True,
+ function_declaration="() => window.foo",
+ target=RealmTarget(first_tab_default_realm),
+ await_promise=True,
+ )
+ recursive_compare(
+ {"realm": first_tab_default_realm, "result": {"type": "number", "value": 3}}, top_context_result
+ )
+
+ new_context_result = await bidi_session.script.call_function(
+ raw_result=True,
+ function_declaration="() => window.foo",
+ target=RealmTarget(second_tab_default_realm),
+ await_promise=True,
+ )
+ recursive_compare(
+ {"realm": second_tab_default_realm, "result": {"type": "number", "value": 5}}, new_context_result
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result.py
new file mode 100644
index 0000000000..f5ce1b36f1
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result.py
@@ -0,0 +1,160 @@
+import pytest
+from webdriver.bidi.modules.script import ContextTarget
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("await_promise", [True, False])
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ ("undefined", {"type": "undefined"}),
+ ("null", {"type": "null"}),
+ ("'foobar'", {"type": "string", "value": "foobar"}),
+ ("'2'", {"type": "string", "value": "2"}),
+ ("Number.NaN", {"type": "number", "value": "NaN"}),
+ ("-0", {"type": "number", "value": "-0"}),
+ ("Infinity", {"type": "number", "value": "Infinity"}),
+ ("-Infinity", {"type": "number", "value": "-Infinity"}),
+ ("3", {"type": "number", "value": 3}),
+ ("1.4", {"type": "number", "value": 1.4}),
+ ("true", {"type": "boolean", "value": True}),
+ ("false", {"type": "boolean", "value": False}),
+ ("42n", {"type": "bigint", "value": "42"}),
+ ],
+)
+async def test_primitive_values(bidi_session, top_context, await_promise, expression, expected):
+ function_declaration = f"()=>{expression}"
+ if await_promise:
+ function_declaration = "async" + function_declaration
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ await_promise=await_promise,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == expected
+
+
+@pytest.mark.parametrize("await_promise", [True, False])
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ ("(Symbol('foo'))", {"type": "symbol"}),
+ (
+ "[1, 'foo', true, new RegExp(/foo/g), [1]]",
+ {
+ "type": "array",
+ "value": [
+ {"type": "number", "value": 1},
+ {"type": "string", "value": "foo"},
+ {"type": "boolean", "value": True},
+ {
+ "type": "regexp",
+ "value": {
+ "pattern": "foo",
+ "flags": "g",
+ },
+ },
+ {"type": "array"},
+ ],
+ },
+ ),
+ (
+ "({'foo': {'bar': 'baz'}, 'qux': 'quux'})",
+ {
+ "type": "object",
+ "value": [
+ ["foo", {"type": "object"}],
+ ["qux", {"type": "string", "value": "quux"}],
+ ],
+ },
+ ),
+ ("(()=>{})", {"type": "function"}),
+ ("(function(){})", {"type": "function"}),
+ ("(async ()=>{})", {"type": "function"}),
+ ("(async function(){})", {"type": "function"}),
+ (
+ "new RegExp(/foo/g)",
+ {"type": "regexp", "value": {"pattern": "foo", "flags": "g"}},
+ ),
+ (
+ "new Date(1654004849000)",
+ {"type": "date", "value": "2022-05-31T13:47:29.000Z"},
+ ),
+ (
+ "new Map([[1, 2], ['foo', 'bar'], [true, false], ['baz', [1]]])",
+ {
+ "type": "map",
+ "value": [
+ [
+ {"type": "number", "value": 1},
+ {"type": "number", "value": 2},
+ ],
+ ["foo", {"type": "string", "value": "bar"}],
+ [
+ {"type": "boolean", "value": True},
+ {"type": "boolean", "value": False},
+ ],
+ ["baz", {"type": "array"}],
+ ],
+ },
+ ),
+ (
+ "new Set([1, 'foo', true, [1], new Map([[1,2]])])",
+ {
+ "type": "set",
+ "value": [
+ {"type": "number", "value": 1},
+ {"type": "string", "value": "foo"},
+ {"type": "boolean", "value": True},
+ {"type": "array"},
+ {"type": "map"},
+ ],
+ },
+ ),
+ ("new WeakMap()", {"type": "weakmap"}),
+ ("new WeakSet()", {"type": "weakset"}),
+ ("new Error('SOME_ERROR_TEXT')", {"type": "error"}),
+ # TODO(sadym): add `iterator` test.
+ # TODO(sadym): add `generator` test.
+ # TODO(sadym): add `proxy` test.
+ ("new Int32Array()", {"type": "typedarray"}),
+ ("new ArrayBuffer()", {"type": "arraybuffer"}),
+ ("window", {"type": "window"}),
+ ("new URL('https://example.com')", {"type": "object", },),
+ ],
+)
+async def test_remote_values(bidi_session, top_context, await_promise, expression, expected):
+ function_declaration = f"()=>{expression}"
+ if await_promise:
+ function_declaration = "async" + function_declaration
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ await_promise=await_promise,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == expected
+
+
+async def test_remote_value_promise_await(bidi_session, top_context):
+ result = await bidi_session.script.call_function(
+ function_declaration="()=>Promise.resolve(42)",
+ await_promise=True,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == {"type": "number", "value": 42}
+
+
+async def test_remote_value_promise_no_await(bidi_session, top_context):
+ result = await bidi_session.script.call_function(
+ function_declaration="()=>Promise.resolve(42)",
+ await_promise=False,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == {"type": "promise"}
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_node.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_node.py
new file mode 100644
index 0000000000..e2f81a4f53
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_node.py
@@ -0,0 +1,596 @@
+import pytest
+from webdriver.bidi.modules.script import ContextTarget
+
+page_data = """
+ <div id="deep"><p><span></span></p><br/></div>
+ <div id="text-node"><p></p>Lorem</div>
+ <br/>
+ <svg id="foo"></svg>
+ <div id="comment"><!-- Comment --></div>
+ <script>
+ var svg = document.querySelector("svg");
+ svg.setAttributeNS("http://www.w3.org/2000/svg", "svg:foo", "bar");
+ </script>
+"""
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "function_declaration, expected",
+ [
+ ( # basic
+ """
+ () => document.querySelector("br")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "br",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1,
+ },
+ },
+ ),
+ ( # attributes
+ """
+ () => document.querySelector("svg")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "attributes": {
+ "id": "foo",
+ "svg:foo": "bar",
+ },
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "svg",
+ "namespaceURI": "http://www.w3.org/2000/svg",
+ "nodeType": 1,
+ },
+ },
+ ),
+ ( # all children including non-element nodes
+ """
+ () => document.querySelector("div#text-node")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "attributes": {"id": "text-node"},
+ "childNodeCount": 2,
+ "children": [{
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": None,
+ "localName": "p",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }, {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": None,
+ "nodeType": 3,
+ "nodeValue": "Lorem",
+ }
+ }],
+ "localName": "div",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1,
+ },
+ },
+ ),
+ ( # children limited due to max depth
+ """
+ () => document.querySelector("div#deep")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "attributes": {"id": "deep"},
+ "childNodeCount": 2,
+ "children": [{
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 1,
+ "children": None,
+ "localName": "p",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }, {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": None,
+ "localName": "br",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }],
+ "localName": "div",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1,
+ },
+ },
+ ),
+ ( # not connected
+ """
+ () => document.createElement("div")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "div",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1,
+ },
+ },
+ ),
+ ], ids=[
+ "basic",
+ "attributes",
+ "all_children",
+ "children_max_depth",
+ "not_connected",
+ ]
+)
+async def test_element_node(
+ bidi_session, inline, top_context, function_declaration, expected
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "function_declaration, expected",
+ [
+ (
+ """
+ () => document.querySelector("svg").attributes[0]
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "id",
+ "namespaceURI": None,
+ "nodeType": 2,
+ "nodeValue": "foo",
+ },
+ },
+ ), (
+ """
+ () => document.querySelector("svg").attributes[1]
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "foo",
+ "namespaceURI": "http://www.w3.org/2000/svg",
+ "nodeType": 2,
+ "nodeValue": "bar",
+ },
+ },
+ ),
+ ], ids=[
+ "basic",
+ "namespace",
+ ]
+)
+async def test_attribute_node(
+ bidi_session, inline, top_context, function_declaration, expected
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "function_declaration, expected",
+ [
+ (
+ """
+ () => document.querySelector("div#text-node").childNodes[1]
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 3,
+ "nodeValue": "Lorem",
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_text_node(bidi_session, inline, top_context, function_declaration, expected):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "function_declaration, expected",
+ [
+ (
+ """
+ () => document.querySelector("foo").childNodes[1]
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 4,
+ "nodeValue": " < > & ",
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_cdata_node(bidi_session, inline, new_tab, function_declaration, expected):
+ xml_page = inline("""<foo>CDATA section: <![CDATA[ < > & ]]>.</foo>""", doctype="xml")
+
+ await bidi_session.browsing_context.navigate(
+ context=new_tab['context'], url=xml_page, wait="complete"
+ )
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ target=ContextTarget(new_tab["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "function_declaration, expected",
+ [
+ (
+ """
+ () => document.createProcessingInstruction("xml-stylesheet", "href='foo.css'")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 7,
+ "nodeValue": "href='foo.css'",
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_processing_instruction_node(
+ bidi_session, inline, new_tab, function_declaration, expected
+):
+ xml_page = inline("""<foo></foo>""", doctype="xml")
+
+ await bidi_session.browsing_context.navigate(
+ context=new_tab['context'], url=xml_page, wait="complete"
+ )
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ target=ContextTarget(new_tab["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "function_declaration, expected",
+ [
+ (
+ """
+ () => document.querySelector("div#comment").childNodes[0]
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 8,
+ "nodeValue": " Comment ",
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_comment_node(
+ bidi_session, inline, top_context, function_declaration, expected
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "function_declaration, expected",
+ [
+ (
+ """
+ () => document
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 2,
+ "children": [{
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": None,
+ "nodeType": 10
+ }
+ }, {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 2,
+ "children": None,
+ "localName": "html",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }],
+ "nodeType": 9
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_document_node(
+ bidi_session, inline, top_context, function_declaration, expected
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "function_declaration, expected",
+ [
+ (
+ """
+ () => document.doctype
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 10,
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_doctype_node(
+ bidi_session, inline, top_context, function_declaration, expected
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "function_declaration, expected",
+ [
+ (
+ """
+ () => new DocumentFragment()
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 11,
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_document_fragment_node(
+ bidi_session, inline, top_context, function_declaration, expected
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+async def test_node_within_object(bidi_session, inline, top_context):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.call_function(
+ function_declaration="""() => ({"elem": document.querySelector("span")})""",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == {
+ "type": "object",
+ "value": [
+ ["elem", {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": None,
+ "localName": "span",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }]
+ ]
+ }
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "function_declaration, expected",
+ [
+ (
+ "() => document.getElementsByTagName('span')",
+ {
+ "type": "htmlcollection",
+ "value": [
+ {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": None,
+ "localName": "span",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ },
+ ]
+ }
+ ),
+ (
+ "() => document.querySelectorAll('span')",
+ {
+ "type": "nodelist",
+ "value": [
+ {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": None,
+ "localName": "span",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ },
+ ]
+ }
+ ),
+ ], ids=[
+ "htmlcollection",
+ "nodelist"
+ ]
+)
+async def test_node_within_dom_collection(
+ bidi_session,
+ inline,
+ top_context,
+ function_declaration,
+ expected
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_ownership.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_ownership.py
new file mode 100644
index 0000000000..01c5a28b62
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/result_ownership.py
@@ -0,0 +1,60 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException
+from .. import assert_handle
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("result_ownership, should_contain_handle",
+ [("root", True), ("none", False), (None, False)])
+async def test_throw_exception(bidi_session, top_context, result_ownership, should_contain_handle):
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.call_function(
+ function_declaration='()=>{throw {a:1}}',
+ await_promise=False,
+ result_ownership=result_ownership,
+ target=ContextTarget(top_context["context"]))
+
+ assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("result_ownership, should_contain_handle",
+ [("root", True), ("none", False), (None, False)])
+async def test_invalid_script(bidi_session, top_context, result_ownership, should_contain_handle):
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.call_function(
+ function_declaration="))) !!@@## some invalid JS script (((",
+ await_promise=False,
+ result_ownership=result_ownership,
+ target=ContextTarget(top_context["context"]))
+
+ assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("result_ownership, should_contain_handle",
+ [("root", True), ("none", False), (None, False)])
+async def test_rejected_promise(bidi_session, top_context, result_ownership, should_contain_handle):
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.call_function(
+ function_declaration="()=>{return Promise.reject({a:1})}",
+ await_promise=True,
+ result_ownership=result_ownership,
+ target=ContextTarget(top_context["context"]))
+
+ assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("await_promise", [True, False])
+@pytest.mark.parametrize("result_ownership, should_contain_handle",
+ [("root", True), ("none", False), (None, False)])
+async def test_return_value(bidi_session, top_context, await_promise, result_ownership, should_contain_handle):
+ result = await bidi_session.script.call_function(
+ function_declaration="async function(){return {a:1}}",
+ await_promise=await_promise,
+ result_ownership=result_ownership,
+ target=ContextTarget(top_context["context"]))
+
+ assert_handle(result, should_contain_handle)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/sandbox.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/sandbox.py
new file mode 100644
index 0000000000..e0a4914d8f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/sandbox.py
@@ -0,0 +1,204 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget, RealmTarget, ScriptEvaluateResultException
+
+from ... import any_int, any_string, recursive_compare
+from .. import any_stack_trace
+
+
+@pytest.mark.asyncio
+async def test_sandbox(bidi_session, new_tab):
+ # Make changes in window
+ await bidi_session.script.call_function(
+ function_declaration="() => { window.foo = 1; }",
+ target=ContextTarget(new_tab["context"]),
+ await_promise=True,
+ )
+
+ # Check that changes are not present in sandbox
+ result_in_sandbox = await bidi_session.script.call_function(
+ function_declaration="() => window.foo",
+ target=ContextTarget(new_tab["context"], "sandbox"),
+ await_promise=True,
+ )
+ assert result_in_sandbox == {"type": "undefined"}
+
+ # Make changes in sandbox
+ await bidi_session.script.call_function(
+ function_declaration="() => { window.bar = 2; }",
+ target=ContextTarget(new_tab["context"], "sandbox"),
+ await_promise=True,
+ )
+
+ # Make sure that changes are present in sandbox
+ result_in_sandbox = await bidi_session.script.call_function(
+ function_declaration="() => window.bar",
+ target=ContextTarget(new_tab["context"], "sandbox"),
+ await_promise=True,
+ )
+ assert result_in_sandbox == {"type": "number", "value": 2}
+
+ # Make sure that changes didn't leak from sandbox
+ result_in_window = await bidi_session.script.call_function(
+ function_declaration="() => window.bar",
+ target=ContextTarget(new_tab["context"]),
+ await_promise=True,
+ )
+ assert result_in_window == {"type": "undefined"}
+
+
+@pytest.mark.asyncio
+async def test_sandbox_with_empty_name(bidi_session, new_tab):
+ # An empty string as a `sandbox` means the default realm should be used.
+ await bidi_session.script.call_function(
+ function_declaration="() => window.foo = 'bar'",
+ target=ContextTarget(new_tab["context"], ""),
+ await_promise=True,
+ )
+
+ # Make sure that we can find the sandbox with the empty name.
+ result = await bidi_session.script.call_function(
+ function_declaration="() => window.foo",
+ target=ContextTarget(new_tab["context"], ""),
+ await_promise=True,
+ )
+ assert result == {"type": "string", "value": "bar"}
+
+ # Make sure that we can find the value in the default realm.
+ result = await bidi_session.script.call_function(
+ function_declaration="() => window.foo",
+ target=ContextTarget(new_tab["context"]),
+ await_promise=True,
+ )
+ assert result == {"type": "string", "value": "bar"}
+
+
+@pytest.mark.asyncio
+async def test_switch_sandboxes(bidi_session, new_tab):
+ # Test that sandboxes are retained when switching between them
+ await bidi_session.script.call_function(
+ function_declaration="() => { window.foo = 1; }",
+ target=ContextTarget(new_tab["context"], "sandbox_1"),
+ await_promise=True,
+ )
+ await bidi_session.script.call_function(
+ function_declaration="() => { window.foo = 2; }",
+ target=ContextTarget(new_tab["context"], "sandbox_2"),
+ await_promise=True,
+ )
+
+ result_in_sandbox_1 = await bidi_session.script.call_function(
+ function_declaration="() => window.foo",
+ target=ContextTarget(new_tab["context"], "sandbox_1"),
+ await_promise=True,
+ )
+ assert result_in_sandbox_1 == {"type": "number", "value": 1}
+
+ result_in_sandbox_2 = await bidi_session.script.call_function(
+ function_declaration="() => window.foo",
+ target=ContextTarget(new_tab["context"], "sandbox_2"),
+ await_promise=True,
+ )
+ assert result_in_sandbox_2 == {"type": "number", "value": 2}
+
+
+@pytest.mark.asyncio
+async def test_sandbox_with_side_effects(bidi_session, new_tab):
+ # Make sure changing the node in sandbox will affect the other sandbox as well
+ await bidi_session.script.call_function(
+ function_declaration="() => document.querySelector('body').textContent = 'foo'",
+ target=ContextTarget(new_tab["context"], "sandbox_1"),
+ await_promise=True,
+ )
+ expected_value = {"type": "string", "value": "foo"}
+
+ result_in_sandbox_1 = await bidi_session.script.call_function(
+ function_declaration="() => document.querySelector('body').textContent",
+ target=ContextTarget(new_tab["context"], "sandbox_1"),
+ await_promise=True,
+ )
+ assert result_in_sandbox_1 == expected_value
+
+ result_in_sandbox_2 = await bidi_session.script.call_function(
+ function_declaration="() => document.querySelector('body').textContent",
+ target=ContextTarget(new_tab["context"], "sandbox_2"),
+ await_promise=True,
+ )
+ assert result_in_sandbox_2 == expected_value
+
+
+@pytest.mark.asyncio
+async def test_arguments(bidi_session, new_tab):
+ argument = {
+ "type": "set",
+ "value": [
+ {"type": "string", "value": "foobar"},
+ ],
+ }
+ result = await bidi_session.script.call_function(
+ function_declaration=f"""(arg) => {{
+ if(! (arg instanceof Set))
+ throw Error("Argument type should be Set, but was "+
+ Object.prototype.toString.call(arg));
+ return arg;
+ }}""",
+ arguments=[argument],
+ await_promise=False,
+ target=ContextTarget(new_tab["context"], "sandbox"),
+ )
+
+ recursive_compare(argument, result)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("await_promise", [True, False])
+async def test_exception_details(bidi_session, new_tab, await_promise):
+ function_declaration = "()=>{{ throw 1 }}"
+ if await_promise:
+ function_declaration = "async" + function_declaration
+
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ await_promise=await_promise,
+ target=ContextTarget(new_tab["context"], "sandbox"),
+ )
+
+ recursive_compare(
+ {
+ "realm": any_string,
+ "exceptionDetails": {
+ "columnNumber": any_int,
+ "exception": {"type": "number", "value": 1},
+ "lineNumber": any_int,
+ "stackTrace": any_stack_trace,
+ "text": any_string,
+ },
+ },
+ exception.value.result,
+ )
+
+
+@pytest.mark.asyncio
+async def test_target_realm(bidi_session, top_context, default_realm):
+ result = await bidi_session.script.call_function(
+ raw_result=True,
+ function_declaration="() => { window.foo = 3; }",
+ target=ContextTarget(top_context["context"], "sandbox"),
+ await_promise=True,
+ )
+ realm = result["realm"]
+
+ # Make sure that sandbox realm id is different from default
+ assert realm != default_realm
+
+ result = await bidi_session.script.call_function(
+ raw_result=True,
+ function_declaration="() => window.foo",
+ target=RealmTarget(realm),
+ await_promise=True,
+ )
+
+ recursive_compare(
+ {"realm": realm, "result": {"type": "number", "value": 3}}, result
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/strict_mode.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/strict_mode.py
new file mode 100644
index 0000000000..1a9fd4f108
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/strict_mode.py
@@ -0,0 +1,38 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException
+from ... import any_int, any_string, recursive_compare
+from .. import any_stack_trace, specific_error_response
+
+
+@pytest.mark.asyncio
+async def test_strict_mode(bidi_session, top_context):
+
+ # As long as there is no `SOME_VARIABLE`, the command should fail in strict mode.
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.call_function(
+ function_declaration="()=>{'use strict';return SOME_VARIABLE=1}",
+ await_promise=False,
+ target=ContextTarget(top_context["context"]),
+ )
+ recursive_compare(specific_error_response({"type": "error"}), exception.value.result)
+
+ # In non-strict mode, the command should succeed and global `SOME_VARIABLE` should be created.
+ result = await bidi_session.script.call_function(
+ function_declaration="()=>{return SOME_VARIABLE=1}",
+ await_promise=False,
+ target=ContextTarget(top_context["context"]),
+ )
+ assert result == {
+ "type": "number",
+ "value": 1}
+
+ # Access created by the previous command `SOME_VARIABLE`.
+ result = await bidi_session.script.call_function(
+ function_declaration="()=>{'use strict';return SOME_VARIABLE=1}",
+ await_promise=False,
+ target=ContextTarget(top_context["context"]),
+ )
+ assert result == {
+ "type": "number",
+ "value": 1}
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/this.py b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/this.py
new file mode 100644
index 0000000000..17501b4ea5
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/call_function/this.py
@@ -0,0 +1,85 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget
+
+from ... import recursive_compare
+
+
+@pytest.mark.asyncio
+async def test_this(bidi_session, top_context):
+ result = await bidi_session.script.call_function(
+ function_declaration="function(){return this.some_property}",
+ this={
+ "type": "object",
+ "value": [[
+ "some_property",
+ {
+ "type": "number",
+ "value": 42
+ }]]},
+ await_promise=False,
+ target=ContextTarget(top_context["context"]))
+
+ assert result == {
+ 'type': 'number',
+ 'value': 42}
+
+
+@pytest.mark.asyncio
+async def test_default_this(bidi_session, top_context):
+ result = await bidi_session.script.call_function(
+ function_declaration="function(){return this}",
+ await_promise=False,
+ target=ContextTarget(top_context["context"]))
+
+ recursive_compare({
+ "type": 'window',
+ }, result)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "value_fn, function_declaration",
+ [
+ (
+ lambda value: value,
+ "function() { return this === window.SOME_OBJECT; }",
+ ),
+ (
+ lambda value: ({"type": "object", "value": [["nested", value]]}),
+ "function() { return this.nested === window.SOME_OBJECT; }",
+ ),
+ (
+ lambda value: ({"type": "array", "value": [value]}),
+ "function() { return this[0] === window.SOME_OBJECT; }",
+ ),
+ (
+ lambda value: ({"type": "map", "value": [["foobar", value]]}),
+ "function() { return this.get('foobar') === window.SOME_OBJECT; }",
+ ),
+ (
+ lambda value: ({"type": "set", "value": [value]}),
+ "function() { return this.has(window.SOME_OBJECT); }",
+ ),
+ ],
+)
+async def test_remote_value_deserialization(
+ bidi_session, top_context, call_function, evaluate, value_fn, function_declaration
+):
+ remote_value = await evaluate(
+ "window.SOME_OBJECT = {SOME_PROPERTY:'SOME_VALUE'}; window.SOME_OBJECT",
+ result_ownership="root",
+ )
+
+ # Check that a remote value can be successfully deserialized as the "this"
+ # parameter and compared against the original object in the page.
+ result = await call_function(
+ function_declaration=function_declaration,
+ this=value_fn(remote_value),
+ )
+ assert result == {"type": "boolean", "value": True}
+
+ # Reload the page to cleanup the state
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=top_context["url"], wait="complete"
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/conftest.py b/testing/web-platform/tests/webdriver/tests/bidi/script/conftest.py
new file mode 100644
index 0000000000..09eea7b7d5
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/conftest.py
@@ -0,0 +1,62 @@
+import pytest
+from typing import Any, List, Mapping
+
+from webdriver.bidi.modules.script import ContextTarget, OwnershipModel
+
+
+@pytest.fixture
+def call_function(bidi_session, top_context):
+ async def call_function(
+ function_declaration: str,
+ arguments: List[Mapping[str, Any]] = [],
+ this: Any = None,
+ context: str = top_context["context"],
+ sandbox: str = None,
+ result_ownership: OwnershipModel = OwnershipModel.NONE.value,
+ ) -> Mapping[str, Any]:
+ if sandbox is None:
+ target = ContextTarget(top_context["context"])
+ else:
+ target = ContextTarget(top_context["context"], sandbox)
+
+ result = await bidi_session.script.call_function(
+ function_declaration=function_declaration,
+ arguments=arguments,
+ this=this,
+ await_promise=False,
+ result_ownership=result_ownership,
+ target=target,
+ )
+ return result
+
+ return call_function
+
+
+@pytest.fixture
+async def default_realm(bidi_session, top_context):
+ realms = await bidi_session.script.get_realms(context=top_context["context"])
+ return realms[0]["realm"]
+
+
+@pytest.fixture
+def evaluate(bidi_session, top_context):
+ async def evaluate(
+ expression: str,
+ context: str = top_context["context"],
+ sandbox: str = None,
+ result_ownership: OwnershipModel = OwnershipModel.NONE.value,
+ ) -> Mapping[str, Any]:
+ if sandbox is None:
+ target = ContextTarget(top_context["context"])
+ else:
+ target = ContextTarget(top_context["context"], sandbox)
+
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ await_promise=False,
+ result_ownership=result_ownership,
+ target=target,
+ )
+ return result
+
+ return evaluate
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/disown/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/disown/handles.py b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/handles.py
new file mode 100644
index 0000000000..d4a5145766
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/handles.py
@@ -0,0 +1,173 @@
+import pytest
+
+import webdriver.bidi.error as error
+
+from webdriver.bidi.modules.script import ContextTarget
+
+from .. import assert_handle
+
+pytestmark = pytest.mark.asyncio
+
+
+async def test_basic_handle(bidi_session, top_context, call_function):
+ remote_value = await bidi_session.script.evaluate(
+ expression="({a:1})",
+ await_promise=False,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert_handle(remote_value, True)
+
+ result = await call_function("arg => arg.a", [remote_value])
+
+ assert result == {"type": "number", "value": 1}
+
+ await bidi_session.script.disown(
+ handles=[remote_value["handle"]], target=ContextTarget(top_context["context"])
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await call_function("arg => arg.a", [remote_value])
+
+
+async def test_multiple_handles_for_different_objects(
+ bidi_session, top_context, call_function
+):
+ # Create a handle
+ remote_value_a = await bidi_session.script.evaluate(
+ expression="({a:1})",
+ await_promise=False,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"]),
+ )
+
+ remote_value_b = await bidi_session.script.evaluate(
+ expression="({b:2})",
+ await_promise=False,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"]),
+ )
+
+ remote_value_c = await bidi_session.script.evaluate(
+ expression="({c:3})",
+ await_promise=False,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert_handle(remote_value_a, True)
+ assert_handle(remote_value_b, True)
+ assert_handle(remote_value_c, True)
+
+ # disown a and b
+ await bidi_session.script.disown(
+ handles=[remote_value_a["handle"], remote_value_b["handle"]],
+ target=ContextTarget(top_context["context"]),
+ )
+
+ # using handle a or b should raise an exception
+ with pytest.raises(error.InvalidArgumentException):
+ await call_function("arg => arg.a", [remote_value_a])
+
+ with pytest.raises(error.InvalidArgumentException):
+ await call_function("arg => arg.b", [remote_value_b])
+
+ # remote value c should still work
+ result = await call_function("arg => arg.c", [remote_value_c])
+
+ assert result == {"type": "number", "value": 3}
+
+ # disown c
+ await bidi_session.script.disown(
+ handles=[remote_value_c["handle"]], target=ContextTarget(top_context["context"])
+ )
+
+ # using handle c should raise an exception
+ with pytest.raises(error.InvalidArgumentException):
+ await call_function("arg => arg.c", [remote_value_c])
+
+
+async def test_multiple_handles_for_same_object(
+ bidi_session, top_context, call_function
+):
+ remote_value1 = await bidi_session.script.evaluate(
+ expression="window.test = { a: 1 }; window.test",
+ await_promise=False,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"]),
+ )
+ assert_handle(remote_value1, True)
+
+ remote_value2 = await bidi_session.script.evaluate(
+ expression="window.test",
+ await_promise=False,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"]),
+ )
+ assert_handle(remote_value2, True)
+
+ # Check that both handles can be used
+ result = await call_function("arg => arg.a", [remote_value1])
+ assert result == {"type": "number", "value": 1}
+
+ result = await call_function("arg => arg.a", [remote_value2])
+ assert result == {"type": "number", "value": 1}
+
+ # Check that both handles point to the same value
+ result = await call_function(
+ "(arg1, arg2) => arg1 === arg2", [remote_value1, remote_value2]
+ )
+ assert result == {"type": "boolean", "value": True}
+
+ # Disown the handle 1
+ await bidi_session.script.disown(
+ handles=[remote_value1["handle"]], target=ContextTarget(top_context["context"])
+ )
+
+ # Using handle 1 should raise an exception
+ with pytest.raises(error.InvalidArgumentException):
+ await call_function("arg => arg.a", [remote_value1])
+
+ # Using handle 2 should still work
+ result = await call_function("arg => arg.a", [remote_value2])
+ assert result == {"type": "number", "value": 1}
+
+ # Disown the handle 2
+ await bidi_session.script.disown(
+ handles=[remote_value2["handle"]], target=ContextTarget(top_context["context"])
+ )
+
+ # Using handle 2 should raise an exception
+ with pytest.raises(error.InvalidArgumentException):
+ await call_function("arg => arg.a", [remote_value2])
+
+
+async def test_unknown_handle(bidi_session, top_context, call_function):
+ # Create a handle
+ remote_value = await bidi_session.script.evaluate(
+ expression="({a:1})",
+ await_promise=False,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert_handle(remote_value, True)
+
+ # An unknown handle should not remove other handles, and should not fail
+ await bidi_session.script.disown(
+ handles=["unknown_handle"], target=ContextTarget(top_context["context"])
+ )
+
+ result = await call_function("arg => arg.a", [remote_value])
+
+ assert result == {"type": "number", "value": 1}
+
+ # Passing an unknown handle with an existing handle should disown the existing one
+ await bidi_session.script.disown(
+ handles=["unknown_handle", remote_value["handle"]],
+ target=ContextTarget(top_context["context"]),
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await call_function("arg => arg.a", [remote_value])
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid.py
new file mode 100644
index 0000000000..f9849f3e39
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid.py
@@ -0,0 +1,68 @@
+import pytest
+import webdriver.bidi.error as error
+
+from webdriver.bidi.modules.script import ContextTarget, RealmTarget
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("target", [None, False, "foo", 42, {}, []])
+async def test_params_target_invalid_type(bidi_session, target):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.disown(
+ handles=[],
+ target=target)
+
+
+@pytest.mark.parametrize("context", [None, False, 42, {}, []])
+async def test_params_context_invalid_type(bidi_session, context):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.disown(
+ handles=[],
+ target=ContextTarget(context))
+
+
+@pytest.mark.parametrize("sandbox", [False, 42, {}, []])
+async def test_params_sandbox_invalid_type(bidi_session, top_context, sandbox):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.disown(
+ handles=[],
+ target=ContextTarget(top_context["context"], sandbox))
+
+
+async def test_params_context_unknown(bidi_session):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.script.disown(
+ handles=[],
+ target=ContextTarget("_UNKNOWN_"))
+
+
+@pytest.mark.parametrize("realm", [None, False, 42, {}, []])
+async def test_params_realm_invalid_type(bidi_session, realm):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.disown(
+ handles=[],
+ target=RealmTarget(realm))
+
+
+async def test_params_realm_unknown(bidi_session):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.script.disown(
+ handles=[],
+ target=RealmTarget("_UNKNOWN_"))
+
+
+@pytest.mark.parametrize("handles", [None, False, "foo", 42, {}])
+async def test_params_handles_invalid_type(bidi_session, top_context, handles):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.disown(
+ handles=handles,
+ target=ContextTarget(top_context["context"]))
+
+
+@pytest.mark.parametrize("handle", [None, False, 42, {}, []])
+async def test_params_handles_invalid_handle_type(bidi_session, top_context, handle):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.disown(
+ handles=[handle],
+ target=ContextTarget(top_context["context"]))
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid_tentative.py b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid_tentative.py
new file mode 100644
index 0000000000..730e2f575f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/invalid_tentative.py
@@ -0,0 +1,35 @@
+import pytest
+import webdriver.bidi.error as error
+
+from webdriver.bidi.modules.script import ContextTarget
+
+pytestmark = pytest.mark.asyncio
+
+
+# The following tests are marked as tentative until
+# https://github.com/w3c/webdriver-bidi/issues/274 is resolved.
+async def test_params_target_invalid_value(bidi_session, top_context):
+ result = await bidi_session.script.call_function(
+ raw_result=True,
+ function_declaration="() => 1 + 2",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True,
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.disown(
+ handles=[],
+ target={"context": top_context["context"], "realm": result["realm"]},
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.disown(
+ handles=[],
+ target={"sandbox": "foo", "realm": result["realm"]},
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.disown(
+ handles=[],
+ target={"sandbox": "bar"},
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/disown/target.py b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/target.py
new file mode 100644
index 0000000000..d8439c2be3
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/disown/target.py
@@ -0,0 +1,95 @@
+import pytest
+
+import webdriver.bidi.error as error
+
+from webdriver.bidi.modules.script import ContextTarget, RealmTarget
+
+from .. import assert_handle
+
+pytestmark = pytest.mark.asyncio
+
+
+async def test_realm(bidi_session, top_context, call_function):
+ remote_value = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="({a:1})",
+ await_promise=False,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert_handle(remote_value["result"], True)
+
+ result = await call_function("arg => arg.a", [remote_value["result"]])
+
+ assert result == {"type": "number", "value": 1}
+
+ await bidi_session.script.disown(
+ handles=[remote_value["result"]["handle"]],
+ target=RealmTarget(remote_value["realm"]),
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await call_function("arg => arg.a", [remote_value["result"]])
+
+
+async def test_sandbox(bidi_session, top_context, call_function):
+ # Create a remote value outside of any sandbox
+ remote_value = await bidi_session.script.evaluate(
+ expression="({a:'without sandbox'})",
+ await_promise=False,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"]),
+ )
+
+ # Create a remote value from a sandbox
+ sandbox_value = await bidi_session.script.evaluate(
+ expression="({a:'with sandbox'})",
+ await_promise=False,
+ result_ownership="root",
+ target=ContextTarget(top_context["context"], "basic_sandbox"),
+ )
+
+ # Try to disown the non-sandboxed remote value from the sandbox
+ await bidi_session.script.disown(
+ handles=[remote_value["handle"]],
+ target=ContextTarget(top_context["context"], "basic_sandbox"),
+ )
+
+ # Check that the remote value is still working
+ result = await call_function("arg => arg.a", [remote_value])
+ assert result == {"type": "string", "value": "without sandbox"}
+
+ # Try to disown the sandbox value:
+ # - from the non-sandboxed top context
+ # - from another sandbox
+ await bidi_session.script.disown(
+ handles=[sandbox_value["handle"]], target=ContextTarget(top_context["context"])
+ )
+ await bidi_session.script.disown(
+ handles=[sandbox_value["handle"]],
+ target=ContextTarget(top_context["context"], "another_sandbox"),
+ )
+
+ # Check that the sandbox remote value is still working
+ result = await call_function(
+ "arg => arg.a", [sandbox_value], sandbox="basic_sandbox"
+ )
+ assert result == {"type": "string", "value": "with sandbox"}
+
+ # Disown the sandbox remote value from the correct sandbox
+ await bidi_session.script.disown(
+ handles=[sandbox_value["handle"]],
+ target=ContextTarget(top_context["context"], "basic_sandbox"),
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await call_function("arg => arg.a", [sandbox_value], sandbox="basic_sandbox")
+
+ # Disown the non-sandboxed remote value from the top context
+ await bidi_session.script.disown(
+ handles=[remote_value["handle"]], target=ContextTarget(top_context["context"])
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await call_function("arg => arg.a", [remote_value], sandbox="basic_sandbox")
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/await_promise.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/await_promise.py
new file mode 100644
index 0000000000..04530984b2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/await_promise.py
@@ -0,0 +1,217 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException
+
+from ... import any_int, any_string, recursive_compare
+from .. import any_stack_trace
+
+
+@pytest.mark.asyncio
+async def test_await_promise_delayed(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ expression="""
+ new Promise(r => {{
+ setTimeout(() => r("SOME_DELAYED_RESULT"), 0);
+ }})
+ """,
+ await_promise=True,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == {"type": "string", "value": "SOME_DELAYED_RESULT"}
+
+
+@pytest.mark.asyncio
+async def test_await_promise_rejected(bidi_session, top_context):
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.evaluate(
+ expression="Promise.reject('SOME_REJECTED_RESULT')",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True,
+ )
+
+ recursive_compare(
+ {
+ "realm": any_string,
+ "exceptionDetails": {
+ "columnNumber": any_int,
+ "exception": {"type": "string", "value": "SOME_REJECTED_RESULT"},
+ "lineNumber": any_int,
+ "stackTrace": any_stack_trace,
+ "text": any_string,
+ },
+ },
+ exception.value.result,
+ )
+
+
+@pytest.mark.asyncio
+async def test_await_promise_resolved(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ expression="Promise.resolve('SOME_RESOLVED_RESULT')",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True,
+ )
+
+ assert result == {"type": "string", "value": "SOME_RESOLVED_RESULT"}
+
+
+@pytest.mark.asyncio
+async def test_await_resolve_array(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ expression="Promise.resolve([1, 'text', true, ['will not be serialized']])",
+ await_promise=True,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == {
+ "type": "array",
+ "value": [
+ {"type": "number", "value": 1},
+ {"type": "string", "value": "text"},
+ {"type": "boolean", "value": True},
+ {"type": "array"},
+ ],
+ }
+
+
+@pytest.mark.asyncio
+async def test_await_resolve_date(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ expression="Promise.resolve(new Date(0))",
+ await_promise=True,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == {
+ "type": "date",
+ "value": "1970-01-01T00:00:00.000Z",
+ }
+
+
+@pytest.mark.asyncio
+async def test_await_resolve_map(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ expression="""
+ Promise.resolve(
+ new Map([
+ ['key1', 'value1'],
+ [2, new Date(0)],
+ ['key3', new Map([['key4', 'not_serialized']])]
+ ])
+ )""",
+ await_promise=True,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == {
+ "type": "map",
+ "value": [
+ ["key1", {"type": "string", "value": "value1"}],
+ [
+ {"type": "number", "value": 2},
+ {"type": "date", "value": "1970-01-01T00:00:00.000Z"},
+ ],
+ ["key3", {"type": "map"}],
+ ],
+ }
+
+
+@pytest.mark.parametrize(
+ "expression, expected, type",
+ [
+ ("undefined", None, "undefined"),
+ ("null", None, "null"),
+ ('"text"', "text", "string"),
+ ("42", 42, "number"),
+ ("Number.NaN", "NaN", "number"),
+ ("-0", "-0", "number"),
+ ("Infinity", "Infinity", "number"),
+ ("-Infinity", "-Infinity", "number"),
+ ("true", True, "boolean"),
+ ("false", False, "boolean"),
+ ("42n", "42", "bigint"),
+ ],
+)
+@pytest.mark.asyncio
+async def test_await_resolve_primitive(
+ bidi_session, top_context, expression, expected, type
+):
+ result = await bidi_session.script.evaluate(
+ expression=f"Promise.resolve({expression})",
+ await_promise=True,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ if expected is None:
+ assert result == {"type": type}
+ else:
+ assert result == {"type": type, "value": expected}
+
+
+@pytest.mark.asyncio
+async def test_await_resolve_regexp(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ expression="Promise.resolve(/test/i)",
+ await_promise=True,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == {
+ "type": "regexp",
+ "value": {
+ "pattern": "test",
+ "flags": "i",
+ },
+ }
+
+
+@pytest.mark.asyncio
+async def test_await_resolve_set(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ expression="""
+ Promise.resolve(
+ new Set([
+ 'value1',
+ 2,
+ true,
+ new Date(0),
+ new Set([-1, 'not serialized'])
+ ])
+ )""",
+ await_promise=True,
+ target=ContextTarget(top_context["context"]),
+ )
+
+ assert result == {
+ "type": "set",
+ "value": [
+ {"type": "string", "value": "value1"},
+ {"type": "number", "value": 2},
+ {"type": "boolean", "value": True},
+ {"type": "date", "value": "1970-01-01T00:00:00.000Z"},
+ {"type": "set"},
+ ],
+ }
+
+
+@pytest.mark.asyncio
+async def test_no_await_promise_rejected(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ expression="Promise.reject('SOME_REJECTED_RESULT')",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ recursive_compare({"type": "promise"}, result)
+
+
+@pytest.mark.asyncio
+async def test_no_await_promise_resolved(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ expression="Promise.resolve('SOME_RESOLVED_RESULT')",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ recursive_compare({"type": "promise"}, result)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/evaluate.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/evaluate.py
new file mode 100644
index 0000000000..5a8cf61a17
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/evaluate.py
@@ -0,0 +1,95 @@
+import pytest
+from webdriver.bidi.modules.script import ContextTarget, RealmTarget
+
+from ... import recursive_compare
+
+
+@pytest.mark.asyncio
+async def test_eval(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True)
+
+ assert result == {
+ "type": "number",
+ "value": 3}
+
+
+@pytest.mark.asyncio
+async def test_interact_with_dom(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ expression="'window.location.href: ' + window.location.href",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True)
+
+ assert result == {
+ "type": "string",
+ "value": "window.location.href: about:blank"}
+
+
+@pytest.mark.asyncio
+async def test_target_realm(bidi_session, default_realm):
+ result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="window.foo = 3",
+ target=RealmTarget(default_realm),
+ await_promise=True,
+ )
+
+ recursive_compare({"realm": default_realm, "result": {"type": "number", "value": 3}}, result)
+
+ result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="window.foo",
+ target=RealmTarget(default_realm),
+ await_promise=True,
+ )
+
+ recursive_compare(
+ {"realm": default_realm, "result": {"type": "number", "value": 3}}, result
+ )
+
+
+@pytest.mark.asyncio
+async def test_different_target_realm(bidi_session):
+ await bidi_session.browsing_context.create(type_hint="tab")
+
+ realms = await bidi_session.script.get_realms()
+ first_tab_default_realm = realms[0]["realm"]
+ second_tab_default_realm = realms[1]["realm"]
+
+ assert first_tab_default_realm != second_tab_default_realm
+
+ await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="window.foo = 3",
+ target=RealmTarget(first_tab_default_realm),
+ await_promise=True,
+ )
+ await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="window.foo = 5",
+ target=RealmTarget(second_tab_default_realm),
+ await_promise=True,
+ )
+
+ top_context_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="window.foo",
+ target=RealmTarget(first_tab_default_realm),
+ await_promise=True,
+ )
+ recursive_compare(
+ {"realm": first_tab_default_realm, "result": {"type": "number", "value": 3}}, top_context_result
+ )
+
+ new_context_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="window.foo",
+ target=RealmTarget(second_tab_default_realm),
+ await_promise=True,
+ )
+ recursive_compare(
+ {"realm": second_tab_default_realm, "result": {"type": "number", "value": 5}}, new_context_result
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/exception_details.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/exception_details.py
new file mode 100644
index 0000000000..ac4b8b6944
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/exception_details.py
@@ -0,0 +1,183 @@
+import pytest
+from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException
+
+from ... import any_int, any_string, recursive_compare
+from .. import any_stack_trace
+
+
+@pytest.mark.asyncio
+async def test_invalid_script(bidi_session, top_context):
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.evaluate(
+ expression="))) !!@@## some invalid JS script (((",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True,
+ )
+ recursive_compare(
+ {
+ "realm": any_string,
+ "exceptionDetails": {
+ "columnNumber": any_int,
+ "exception": {"type": "error"},
+ "lineNumber": any_int,
+ "stackTrace": any_stack_trace,
+ "text": any_string,
+ },
+ },
+ exception.value.result,
+ )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("await_promise", [True, False])
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ ("undefined", {"type": "undefined"}),
+ ("null", {"type": "null"}),
+ ("'foobar'", {"type": "string", "value": "foobar"}),
+ ("'2'", {"type": "string", "value": "2"}),
+ ("Number.NaN", {"type": "number", "value": "NaN"}),
+ ("-0", {"type": "number", "value": "-0"}),
+ ("Infinity", {"type": "number", "value": "Infinity"}),
+ ("-Infinity", {"type": "number", "value": "-Infinity"}),
+ ("3", {"type": "number", "value": 3}),
+ ("1.4", {"type": "number", "value": 1.4}),
+ ("true", {"type": "boolean", "value": True}),
+ ("false", {"type": "boolean", "value": False}),
+ ("42n", {"type": "bigint", "value": "42"}),
+ ("(Symbol('foo'))", {"type": "symbol", },),
+ (
+ "[1, 'foo', true, new RegExp(/foo/g), [1]]",
+ {
+ "type": "array",
+ "value": [
+ {"type": "number", "value": 1},
+ {"type": "string", "value": "foo"},
+ {"type": "boolean", "value": True},
+ {
+ "type": "regexp",
+ "value": {
+ "pattern": "foo",
+ "flags": "g",
+ },
+ },
+ {"type": "array"},
+ ],
+ },
+ ),
+ (
+ "({'foo': {'bar': 'baz'}, 'qux': 'quux'})",
+ {
+ "type": "object",
+ "value": [
+ ["foo", {"type": "object"}],
+ ["qux", {"type": "string", "value": "quux"}],
+ ],
+ },
+ ),
+ ("(()=>{})", {"type": "function", },),
+ ("(function(){})", {"type": "function", },),
+ ("(async ()=>{})", {"type": "function", },),
+ ("(async function(){})", {"type": "function", },),
+ (
+ "new RegExp(/foo/g)",
+ {
+ "type": "regexp",
+ "value": {
+ "pattern": "foo",
+ "flags": "g",
+ },
+ },
+ ),
+ (
+ "new Date(1654004849000)",
+ {
+ "type": "date",
+ "value": "2022-05-31T13:47:29.000Z",
+ },
+ ),
+ (
+ "new Map([[1, 2], ['foo', 'bar'], [true, false], ['baz', [1]]])",
+ {
+ "type": "map",
+ "value": [
+ [
+ {"type": "number", "value": 1},
+ {"type": "number", "value": 2},
+ ],
+ ["foo", {"type": "string", "value": "bar"}],
+ [
+ {"type": "boolean", "value": True},
+ {"type": "boolean", "value": False},
+ ],
+ ["baz", {"type": "array"}],
+ ],
+ },
+ ),
+ (
+ "new Set([1, 'foo', true, [1], new Map([[1,2]])])",
+ {
+ "type": "set",
+ "value": [
+ {"type": "number", "value": 1},
+ {"type": "string", "value": "foo"},
+ {"type": "boolean", "value": True},
+ {"type": "array"},
+ {"type": "map"},
+ ],
+ },
+ ),
+ ("new WeakMap()", {"type": "weakmap", },),
+ ("new WeakSet()", {"type": "weakset", },),
+ ("new Error('SOME_ERROR_TEXT')", {"type": "error"},),
+ # TODO(sadym): add `iterator` test.
+ # TODO(sadym): add `generator` test.
+ # TODO(sadym): add `proxy` test.
+ ("Promise.resolve()", {"type": "promise", },),
+ ("new Int32Array()", {"type": "typedarray", },),
+ ("new ArrayBuffer()", {"type": "arraybuffer", },),
+ (
+ "document.createElement('div')",
+ {
+ "type": "node",
+ 'value': {
+ 'attributes': {},
+ 'childNodeCount': 0,
+ 'children': [],
+ 'localName': 'div',
+ 'namespaceURI': 'http://www.w3.org/1999/xhtml',
+ 'nodeType': 1,
+ }
+ },
+ ),
+ ("window", {"type": "window", },),
+ ],
+)
+@pytest.mark.asyncio
+async def test_exception_details(bidi_session, top_context, await_promise, expression, expected):
+ if await_promise:
+ expression = f"Promise.reject({expression})"
+ else:
+ expression = f"throw {expression}"
+
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=await_promise,
+ )
+
+ recursive_compare(
+ {
+ "realm": any_string,
+ "exceptionDetails": {
+ "columnNumber": any_int,
+ "exception": expected,
+ "lineNumber": any_int,
+ "stackTrace": any_stack_trace,
+ "text": any_string,
+ },
+ },
+ exception.value.result,
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/internal_id.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/internal_id.py
new file mode 100644
index 0000000000..98742ef102
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/internal_id.py
@@ -0,0 +1,65 @@
+import pytest
+
+from ... import recursive_compare, any_string
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "return_structure, result_type",
+ [
+ ("[data, data]", "array"),
+ ("new Map([['foo', data],['bar', data]])", "map"),
+ ("({ 'foo': data, 'bar': data })", "object"),
+ ],
+)
+@pytest.mark.parametrize(
+ "expression, type",
+ [
+ ("[1]", "array"),
+ ("new Map([[true, false]])", "map"),
+ ("new Set(['baz'])", "set"),
+ ("{ baz: 'qux' }", "object"),
+ ],
+)
+async def test_remote_values_with_internal_id(
+ evaluate, return_structure, result_type, expression, type
+):
+ result = await evaluate(f"{{const data = {expression}; {return_structure}}}")
+ result_value = result["value"]
+
+ assert len(result_value) == 2
+
+ if result_type == "array":
+ value = [
+ {"type": type, "internalId": any_string},
+ {"type": type, "internalId": any_string},
+ ]
+ internalId1 = result_value[0]["internalId"]
+ internalId2 = result_value[1]["internalId"]
+ else:
+ value = [
+ ["foo", {"type": type, "internalId": any_string}],
+ ["bar", {"type": type, "internalId": any_string}],
+ ]
+ internalId1 = result_value[0][1]["internalId"]
+ internalId2 = result_value[1][1]["internalId"]
+
+ # Make sure that the same duplicated objects have the same internal ids
+ assert internalId1 == internalId2
+
+ recursive_compare(value, result_value)
+
+
+@pytest.mark.asyncio
+async def test_different_remote_values_have_unique_internal_ids(evaluate):
+ result = await evaluate(
+ "{const obj1 = [1]; const obj2 = {'foo': 'bar'}; [obj1, obj2, obj1, obj2]}"
+ )
+
+ assert len(result["value"]) == 4
+
+ internalId1 = result["value"][0]["internalId"]
+ internalId2 = result["value"][1]["internalId"]
+
+ # Make sure that different duplicated objects have different internal ids
+ assert internalId1 != internalId2
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid.py
new file mode 100644
index 0000000000..b02989edce
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid.py
@@ -0,0 +1,86 @@
+import pytest
+import webdriver.bidi.error as error
+
+from webdriver.bidi.modules.script import ContextTarget, RealmTarget
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("target", [None, False, "foo", 42, {}, []])
+async def test_params_target_invalid_type(bidi_session, target):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.evaluate(
+ expression="1 + 2",
+ target=target,
+ await_promise=True)
+
+
+@pytest.mark.parametrize("context", [None, False, 42, {}, []])
+async def test_params_context_invalid_type(bidi_session, context):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.evaluate(
+ expression="1 + 2",
+ target=ContextTarget(context),
+ await_promise=True)
+
+
+@pytest.mark.parametrize("sandbox", [False, 42, {}, []])
+async def test_params_sandbox_invalid_type(bidi_session, top_context, sandbox):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.evaluate(
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"], sandbox),
+ await_promise=True)
+
+
+async def test_params_context_unknown(bidi_session):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.script.evaluate(
+ expression="1 + 2",
+ target=ContextTarget("_UNKNOWN_"),
+ await_promise=True)
+
+
+@pytest.mark.parametrize("realm", [None, False, 42, {}, []])
+async def test_params_realm_invalid_type(bidi_session, realm):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.evaluate(
+ expression="1 + 2",
+ target=RealmTarget(realm),
+ await_promise=True)
+
+
+async def test_params_realm_unknown(bidi_session):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.script.evaluate(
+ expression="1 + 2",
+ target=RealmTarget("_UNKNOWN_"),
+ await_promise=True)
+
+
+@pytest.mark.parametrize("expression", [None, False, 42, {}, []])
+async def test_params_expression_invalid_type(bidi_session, top_context, expression):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=True)
+
+
+@pytest.mark.parametrize("await_promise", [None, "False", 0, 42, {}, []])
+async def test_params_await_promise_invalid_type(bidi_session, top_context, await_promise):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.evaluate(
+ expression="1 + 2",
+ await_promise=await_promise,
+ target=ContextTarget(top_context["context"]))
+
+
+@pytest.mark.parametrize("result_ownership", [False, "_UNKNOWN_", 42, {}, []])
+async def test_params_result_ownership_invalid_value(bidi_session, top_context, result_ownership):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.evaluate(
+ expression="1 + 2",
+ result_ownership=result_ownership,
+ target=ContextTarget(top_context["context"]),
+ await_promise=True)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid_tentative.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid_tentative.py
new file mode 100644
index 0000000000..e98a697c80
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/invalid_tentative.py
@@ -0,0 +1,38 @@
+import pytest
+import webdriver.bidi.error as error
+
+from webdriver.bidi.modules.script import ContextTarget
+
+pytestmark = pytest.mark.asyncio
+
+
+# The following tests are marked as tentative until
+# https://github.com/w3c/webdriver-bidi/issues/274 is resolved.
+async def test_params_target_invalid_value(bidi_session, top_context):
+ result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True,
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.evaluate(
+ expression="1 + 2",
+ target={"context": top_context["context"], "realm": result["realm"]},
+ await_promise=True,
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.evaluate(
+ expression="1 + 2",
+ target={"sandbox": "foo", "realm": result["realm"]},
+ await_promise=True,
+ )
+
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.call_function(
+ function_declaration="1 + 2",
+ target={"sandbox": "bar"},
+ await_promise=True,
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result.py
new file mode 100644
index 0000000000..0e1c98e3bb
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result.py
@@ -0,0 +1,140 @@
+import pytest
+from webdriver.bidi.modules.script import ContextTarget
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ ("undefined", {"type": "undefined"}),
+ ("null", {"type": "null"}),
+ ("'foobar'", {"type": "string", "value": "foobar"}),
+ ("'2'", {"type": "string", "value": "2"}),
+ ("Number.NaN", {"type": "number", "value": "NaN"}),
+ ("-0", {"type": "number", "value": "-0"}),
+ ("Infinity", {"type": "number", "value": "Infinity"}),
+ ("-Infinity", {"type": "number", "value": "-Infinity"}),
+ ("3", {"type": "number", "value": 3}),
+ ("1.4", {"type": "number", "value": 1.4}),
+ ("true", {"type": "boolean", "value": True}),
+ ("false", {"type": "boolean", "value": False}),
+ ("42n", {"type": "bigint", "value": "42"}),
+ ],
+)
+async def test_primitive_values(bidi_session, top_context, expression, expected):
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=True,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ ("(Symbol('foo'))", {"type": "symbol", },),
+ (
+ "[1, 'foo', true, new RegExp(/foo/g), [1]]",
+ {
+ "type": "array",
+ "value": [
+ {"type": "number", "value": 1},
+ {"type": "string", "value": "foo"},
+ {"type": "boolean", "value": True},
+ {
+ "type": "regexp",
+ "value": {
+ "pattern": "foo",
+ "flags": "g",
+ },
+ },
+ {"type": "array"},
+ ],
+ },
+ ),
+ (
+ "({'foo': {'bar': 'baz'}, 'qux': 'quux'})",
+ {
+ "type": "object",
+ "value": [
+ ["foo", {"type": "object"}],
+ ["qux", {"type": "string", "value": "quux"}],
+ ],
+ },
+ ),
+ ("(()=>{})", {"type": "function", },),
+ ("(function(){})", {"type": "function", },),
+ ("(async ()=>{})", {"type": "function", },),
+ ("(async function(){})", {"type": "function", },),
+ (
+ "new RegExp(/foo/g)",
+ {
+ "type": "regexp",
+ "value": {
+ "pattern": "foo",
+ "flags": "g",
+ },
+ },
+ ),
+ (
+ "new Date(1654004849000)",
+ {
+ "type": "date",
+ "value": "2022-05-31T13:47:29.000Z",
+ },
+ ),
+ (
+ "new Map([[1, 2], ['foo', 'bar'], [true, false], ['baz', [1]]])",
+ {
+ "type": "map",
+ "value": [
+ [
+ {"type": "number", "value": 1},
+ {"type": "number", "value": 2},
+ ],
+ ["foo", {"type": "string", "value": "bar"}],
+ [
+ {"type": "boolean", "value": True},
+ {"type": "boolean", "value": False},
+ ],
+ ["baz", {"type": "array"}],
+ ],
+ },
+ ),
+ (
+ "new Set([1, 'foo', true, [1], new Map([[1,2]])])",
+ {
+ "type": "set",
+ "value": [
+ {"type": "number", "value": 1},
+ {"type": "string", "value": "foo"},
+ {"type": "boolean", "value": True},
+ {"type": "array"},
+ {"type": "map"},
+ ],
+ },
+ ),
+ ("new WeakMap()", {"type": "weakmap", },),
+ ("new WeakSet()", {"type": "weakset", },),
+ ("new Error('SOME_ERROR_TEXT')", {"type": "error"},),
+ # TODO(sadym): add `iterator` test.
+ # TODO(sadym): add `generator` test.
+ # TODO(sadym): add `proxy` test.
+ ("Promise.resolve()", {"type": "promise", },),
+ ("new Int32Array()", {"type": "typedarray", },),
+ ("new ArrayBuffer()", {"type": "arraybuffer", },),
+ ("window", {"type": "window", },),
+ ("new URL('https://example.com')", {"type": "object", },),
+ ]
+)
+async def test_remote_values(bidi_session, top_context, expression, expected):
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_node.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_node.py
new file mode 100644
index 0000000000..d8c9f29892
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_node.py
@@ -0,0 +1,586 @@
+import pytest
+from webdriver.bidi.modules.script import ContextTarget
+
+page_data = """
+ <div id="deep"><p><span></span></p><br/></div>
+ <div id="text-node"><p></p>Lorem</div>
+ <br/>
+ <svg id="foo"></svg>
+ <div id="comment"><!-- Comment --></div>
+ <script>
+ var svg = document.querySelector("svg");
+ svg.setAttributeNS("http://www.w3.org/2000/svg", "svg:foo", "bar");
+ </script>
+"""
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ ( # basic
+ """
+ document.querySelector("br")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "br",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1,
+ },
+ },
+ ),
+ ( # attributes
+ """
+ document.querySelector("svg")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "attributes": {
+ "id": "foo",
+ "svg:foo": "bar",
+ },
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "svg",
+ "namespaceURI": "http://www.w3.org/2000/svg",
+ "nodeType": 1,
+ },
+ },
+ ),
+ ( # all children including non-element nodes
+ """
+ document.querySelector("div#text-node")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "attributes": {"id": "text-node"},
+ "childNodeCount": 2,
+ "children": [{
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": None,
+ "localName": "p",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }, {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": None,
+ "nodeType": 3,
+ "nodeValue": "Lorem",
+ }
+ }],
+ "localName": "div",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1,
+ },
+ },
+ ),
+ ( # children limited due to max depth
+ """
+ document.querySelector("div#deep")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "attributes": {"id": "deep"},
+ "childNodeCount": 2,
+ "children": [{
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 1,
+ "children": None,
+ "localName": "p",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }, {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": None,
+ "localName": "br",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }],
+ "localName": "div",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1,
+ },
+ },
+ ),
+ ( # not connected
+ """
+ document.createElement("div")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "div",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1,
+ },
+ },
+ ),
+ ], ids=[
+ "basic",
+ "attributes",
+ "all_children",
+ "children_max_depth",
+ "not_connected",
+ ]
+)
+async def test_element_node(bidi_session, inline, top_context, expression, expected):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ (
+ """
+ document.querySelector("svg").attributes[0]
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "id",
+ "namespaceURI": None,
+ "nodeType": 2,
+ "nodeValue": "foo",
+ },
+ },
+ ), (
+ """
+ document.querySelector("svg").attributes[1]
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "localName": "foo",
+ "namespaceURI": "http://www.w3.org/2000/svg",
+ "nodeType": 2,
+ "nodeValue": "bar",
+ },
+ },
+ ),
+ ], ids=[
+ "basic",
+ "namespace",
+ ]
+)
+async def test_attribute_node(bidi_session, inline, top_context, expression, expected):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ (
+ """
+ document.querySelector("div#text-node").childNodes[1]
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 3,
+ "nodeValue": "Lorem",
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_text_node(bidi_session, inline, top_context, expression, expected):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ (
+ """
+ document.querySelector("foo").childNodes[1]
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 4,
+ "nodeValue": " < > & ",
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_cdata_node(bidi_session, inline, new_tab, expression, expected):
+ xml_page = inline("""<foo>CDATA section: <![CDATA[ < > & ]]>.</foo>""", doctype="xml")
+
+ await bidi_session.browsing_context.navigate(
+ context=new_tab['context'], url=xml_page, wait="complete"
+ )
+
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(new_tab["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ (
+ """
+ document.createProcessingInstruction("xml-stylesheet", "href='foo.css'")
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 7,
+ "nodeValue": "href='foo.css'",
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_processing_instruction_node(
+ bidi_session, inline, new_tab, expression, expected
+):
+ xml_page = inline("""<foo></foo>""", doctype="xml")
+
+ await bidi_session.browsing_context.navigate(
+ context=new_tab['context'], url=xml_page, wait="complete"
+ )
+
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(new_tab["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ (
+ """
+ document.querySelector("div#comment").childNodes[0]
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 8,
+ "nodeValue": " Comment ",
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_comment_node(bidi_session, inline, top_context, expression, expected):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ (
+ """
+ document
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 2,
+ "children": [{
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": None,
+ "nodeType": 10
+ }
+ }, {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 2,
+ "children": None,
+ "localName": "html",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }],
+ "nodeType": 9
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_document_node(bidi_session, inline, top_context, expression, expected):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ (
+ """
+ document.doctype
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 10,
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_doctype_node(bidi_session, inline, top_context, expression, expected):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ (
+ """
+ new DocumentFragment();
+ """,
+ {
+ "type": "node",
+ "value": {
+ "childNodeCount": 0,
+ "children": [],
+ "nodeType": 11,
+ }
+ }
+ ),
+ ], ids=[
+ "basic",
+ ]
+)
+async def test_document_fragment_node(
+ bidi_session, inline, top_context, expression, expected
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
+
+
+@pytest.mark.asyncio
+async def test_node_within_object(bidi_session, inline, top_context):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.evaluate(
+ expression="""({"elem": document.querySelector("span")})""",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == {
+ "type": "object",
+ "value": [
+ ["elem", {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": None,
+ "localName": "span",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ }]
+ ]
+ }
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize(
+ "expression, expected",
+ [
+ (
+ "document.getElementsByTagName('span')",
+ {
+ "type": "htmlcollection",
+ "value": [
+ {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": None,
+ "localName": "span",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ },
+ ]
+ }
+ ),
+ (
+ "document.querySelectorAll('span')",
+ {
+ "type": "nodelist",
+ "value": [
+ {
+ "type": "node",
+ "value": {
+ "attributes": {},
+ "childNodeCount": 0,
+ "children": None,
+ "localName": "span",
+ "namespaceURI": "http://www.w3.org/1999/xhtml",
+ "nodeType": 1
+ }
+ },
+ ]
+ }
+ ),
+ ], ids=[
+ "htmlcollection",
+ "nodelist"
+ ]
+)
+async def test_node_within_dom_collection(
+ bidi_session,
+ inline,
+ top_context,
+ expression,
+ expected
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context['context'], url=inline(page_data), wait="complete"
+ )
+
+ result = await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ assert result == expected
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_ownership.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_ownership.py
new file mode 100644
index 0000000000..77e537bfe0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/result_ownership.py
@@ -0,0 +1,60 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException
+from .. import assert_handle
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("result_ownership, should_contain_handle",
+ [("root", True), ("none", False), (None, False)])
+async def test_throw_exception(bidi_session, top_context, result_ownership, should_contain_handle):
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.evaluate(
+ expression='throw {a:1}',
+ await_promise=False,
+ result_ownership=result_ownership,
+ target=ContextTarget(top_context["context"]))
+
+ assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("result_ownership, should_contain_handle",
+ [("root", True), ("none", False), (None, False)])
+async def test_invalid_script(bidi_session, top_context, result_ownership, should_contain_handle):
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.evaluate(
+ expression="))) !!@@## some invalid JS script (((",
+ await_promise=False,
+ result_ownership=result_ownership,
+ target=ContextTarget(top_context["context"]))
+
+ assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("result_ownership, should_contain_handle",
+ [("root", True), ("none", False), (None, False)])
+async def test_rejected_promise(bidi_session, top_context, result_ownership, should_contain_handle):
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.evaluate(
+ expression="Promise.reject({a:1})",
+ await_promise=True,
+ result_ownership=result_ownership,
+ target=ContextTarget(top_context["context"]))
+
+ assert_handle(exception.value.result["exceptionDetails"]["exception"], should_contain_handle)
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("await_promise", [True, False])
+@pytest.mark.parametrize("result_ownership, should_contain_handle",
+ [("root", True), ("none", False), (None, False)])
+async def test_return_value(bidi_session, top_context, await_promise, result_ownership, should_contain_handle):
+ result = await bidi_session.script.evaluate(
+ expression="Promise.resolve({a:1})",
+ await_promise=await_promise,
+ result_ownership=result_ownership,
+ target=ContextTarget(top_context["context"]))
+
+ assert_handle(result, should_contain_handle)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/sandbox.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/sandbox.py
new file mode 100644
index 0000000000..ba4eb732d1
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/sandbox.py
@@ -0,0 +1,182 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget, RealmTarget, ScriptEvaluateResultException
+
+from ... import any_int, any_string, recursive_compare
+from .. import any_stack_trace
+
+
+@pytest.mark.asyncio
+async def test_sandbox(bidi_session, new_tab):
+ # Make changes in window
+ await bidi_session.script.evaluate(
+ expression="window.foo = 1",
+ target=ContextTarget(new_tab["context"]),
+ await_promise=True,
+ )
+
+ # Check that changes are not present in sandbox
+ result_in_sandbox = await bidi_session.script.evaluate(
+ expression="window.foo",
+ target=ContextTarget(new_tab["context"], "sandbox"),
+ await_promise=True,
+ )
+ assert result_in_sandbox == {"type": "undefined"}
+
+ # Make changes in sandbox
+ await bidi_session.script.evaluate(
+ expression="window.bar = 1",
+ target=ContextTarget(new_tab["context"], "sandbox"),
+ await_promise=True,
+ )
+
+ # Make sure that changes are present in sandbox
+ result_in_sandbox = await bidi_session.script.evaluate(
+ expression="window.bar",
+ target=ContextTarget(new_tab["context"], "sandbox"),
+ await_promise=True,
+ )
+ assert result_in_sandbox == {"type": "number", "value": 1}
+
+ # Make sure that changes didn't leak from sandbox
+ result = await bidi_session.script.evaluate(
+ expression="window.bar",
+ target=ContextTarget(new_tab["context"]),
+ await_promise=True,
+ )
+ assert result == {"type": "undefined"}
+
+
+@pytest.mark.asyncio
+async def test_sandbox_with_empty_name(bidi_session, new_tab):
+ # An empty string as a `sandbox` means the default realm should be used.
+ await bidi_session.script.evaluate(
+ expression="window.foo = 'bar'",
+ target=ContextTarget(new_tab["context"], ""),
+ await_promise=True,
+ )
+
+ # Make sure that we can find the sandbox with the empty name.
+ result = await bidi_session.script.evaluate(
+ expression="window.foo",
+ target=ContextTarget(new_tab["context"], ""),
+ await_promise=True,
+ )
+ assert result == {"type": "string", "value": "bar"}
+
+ # Make sure that we can find the value in the default realm.
+ result = await bidi_session.script.evaluate(
+ expression="window.foo",
+ target=ContextTarget(new_tab["context"]),
+ await_promise=True,
+ )
+ assert result == {"type": "string", "value": "bar"}
+
+
+@pytest.mark.asyncio
+async def test_switch_sandboxes(bidi_session, new_tab):
+ # Test that sandboxes are retained when switching between them
+ await bidi_session.script.evaluate(
+ expression="window.foo = 1",
+ target=ContextTarget(new_tab["context"], "sandbox_1"),
+ await_promise=True,
+ )
+ await bidi_session.script.evaluate(
+ expression="window.foo = 2",
+ target=ContextTarget(new_tab["context"], "sandbox_2"),
+ await_promise=True,
+ )
+
+ result_in_sandbox_1 = await bidi_session.script.evaluate(
+ expression="window.foo",
+ target=ContextTarget(new_tab["context"], "sandbox_1"),
+ await_promise=True,
+ )
+ assert result_in_sandbox_1 == {"type": "number", "value": 1}
+
+ result_in_sandbox_2 = await bidi_session.script.evaluate(
+ expression="window.foo",
+ target=ContextTarget(new_tab["context"], "sandbox_2"),
+ await_promise=True,
+ )
+ assert result_in_sandbox_2 == {"type": "number", "value": 2}
+
+
+@pytest.mark.asyncio
+async def test_sandbox_with_side_effects(bidi_session, new_tab):
+ # Make sure changing the node in sandbox will affect the other sandbox as well
+ await bidi_session.script.evaluate(
+ expression="document.querySelector('body').textContent = 'foo'",
+ target=ContextTarget(new_tab["context"], "sandbox_1"),
+ await_promise=True,
+ )
+ expected_value = {"type": "string", "value": "foo"}
+
+ result_in_sandbox_1 = await bidi_session.script.evaluate(
+ expression="document.querySelector('body').textContent",
+ target=ContextTarget(new_tab["context"], "sandbox_1"),
+ await_promise=True,
+ )
+ assert result_in_sandbox_1 == expected_value
+
+ result_in_sandbox_2 = await bidi_session.script.evaluate(
+ expression="document.querySelector('body').textContent",
+ target=ContextTarget(new_tab["context"], "sandbox_2"),
+ await_promise=True,
+ )
+ assert result_in_sandbox_2 == expected_value
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("await_promise", [True, False])
+async def test_exception_details(bidi_session, new_tab, await_promise):
+ if await_promise:
+ expression = "Promise.reject(1)"
+ else:
+ expression = "throw 1"
+
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.evaluate(
+ expression=expression,
+ target=ContextTarget(new_tab["context"], "sandbox"),
+ await_promise=await_promise,
+ )
+
+ recursive_compare(
+ {
+ "realm": any_string,
+ "exceptionDetails": {
+ "columnNumber": any_int,
+ "exception": {"type": "number", "value": 1},
+ "lineNumber": any_int,
+ "stackTrace": any_stack_trace,
+ "text": any_string,
+ },
+ },
+ exception.value.result,
+ )
+
+
+@pytest.mark.asyncio
+async def test_target_realm(bidi_session, top_context, default_realm):
+ result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="window.foo = 3",
+ target=ContextTarget(top_context["context"], "sandbox"),
+ await_promise=True,
+ )
+ realm = result["realm"]
+
+ # Make sure that sandbox realm id is different from default
+ assert realm != default_realm
+
+ result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="window.foo",
+ target=RealmTarget(realm),
+ await_promise=True,
+ )
+
+ recursive_compare(
+ {"realm": realm, "result": {"type": "number", "value": 3}}, result
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/strict_mode.py b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/strict_mode.py
new file mode 100644
index 0000000000..bcaebb51f4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/evaluate/strict_mode.py
@@ -0,0 +1,34 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget, ScriptEvaluateResultException
+from ... import any_int, any_string, recursive_compare
+from .. import any_stack_trace, specific_error_response
+
+
+@pytest.mark.asyncio
+async def test_strict_mode(bidi_session, top_context):
+ # As long as there is no `SOME_VARIABLE`, the command should fail in strict mode.
+ with pytest.raises(ScriptEvaluateResultException) as exception:
+ await bidi_session.script.evaluate(
+ expression="'use strict';SOME_VARIABLE=1",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True)
+ recursive_compare(specific_error_response({"type": "error"}), exception.value.result)
+
+ # In non-strict mode, the command should succeed and global `SOME_VARIABLE` should be created.
+ result = await bidi_session.script.evaluate(
+ expression="SOME_VARIABLE=1",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True)
+ assert result == {
+ "type": "number",
+ "value": 1}
+
+ # Access created by the previous command `SOME_VARIABLE`.
+ result = await bidi_session.script.evaluate(
+ expression="'use strict';SOME_VARIABLE=1",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True)
+ assert result == {
+ "type": "number",
+ "value": 1}
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/context.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/context.py
new file mode 100644
index 0000000000..1d765c7b4a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/context.py
@@ -0,0 +1,70 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget
+
+from ... import recursive_compare
+
+
+@pytest.mark.asyncio
+async def test_context(
+ bidi_session,
+ test_alt_origin,
+ test_origin,
+ test_page_cross_origin_frame,
+):
+ new_context = await bidi_session.browsing_context.create(type_hint="tab")
+ await bidi_session.browsing_context.navigate(
+ context=new_context["context"],
+ url=test_page_cross_origin_frame,
+ wait="complete",
+ )
+
+ # Evaluate to get realm id
+ new_context_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(new_context["context"]),
+ await_promise=False,
+ )
+
+ result = await bidi_session.script.get_realms(context=new_context["context"])
+
+ recursive_compare(
+ [
+ {
+ "context": new_context["context"],
+ "origin": test_origin,
+ "realm": new_context_result["realm"],
+ "type": "window",
+ },
+ ],
+ result,
+ )
+
+ contexts = await bidi_session.browsing_context.get_tree(root=new_context["context"])
+ assert len(contexts) == 1
+ frames = contexts[0]["children"]
+ assert len(frames) == 1
+ frame_context = frames[0]["context"]
+
+ # Evaluate to get realm id
+ frame_context_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(frame_context),
+ await_promise=False,
+ )
+
+ result = await bidi_session.script.get_realms(context=frame_context)
+
+ recursive_compare(
+ [
+ {
+ "context": frame_context,
+ "origin": test_alt_origin,
+ "realm": frame_context_result["realm"],
+ "type": "window",
+ },
+ ],
+ result,
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/get_realms.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/get_realms.py
new file mode 100644
index 0000000000..4dfce5ab49
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/get_realms.py
@@ -0,0 +1,183 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget
+
+from ... import any_string, recursive_compare
+
+PAGE_ABOUT_BLANK = "about:blank"
+
+
+@pytest.mark.asyncio
+async def test_payload_types(bidi_session):
+ result = await bidi_session.script.get_realms()
+
+ recursive_compare(
+ [
+ {
+ "context": any_string,
+ "origin": any_string,
+ "realm": any_string,
+ "type": any_string,
+ }
+ ],
+ result,
+ )
+
+
+@pytest.mark.asyncio
+async def test_realm_is_consistent_when_calling_twice(bidi_session):
+ result = await bidi_session.script.get_realms()
+
+ result_calling_again = await bidi_session.script.get_realms()
+
+ assert result[0]["realm"] == result_calling_again[0]["realm"]
+
+
+@pytest.mark.asyncio
+async def test_realm_is_different_after_reload(bidi_session, top_context):
+ result = await bidi_session.script.get_realms()
+
+ # Reload the page
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete"
+ )
+
+ result_after_reload = await bidi_session.script.get_realms()
+
+ assert result[0]["realm"] != result_after_reload[0]["realm"]
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("type_hint", ["tab", "window"])
+async def test_multiple_top_level_contexts(bidi_session, top_context, type_hint):
+ new_context = await bidi_session.browsing_context.create(type_hint=type_hint)
+ result = await bidi_session.script.get_realms()
+
+ # Evaluate to get realm ids
+ top_context_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+ new_context_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(new_context["context"]),
+ await_promise=False,
+ )
+
+ recursive_compare(
+ [
+ {
+ "context": top_context["context"],
+ "origin": "null",
+ "realm": top_context_result["realm"],
+ "type": "window",
+ },
+ {
+ "context": new_context["context"],
+ "origin": "null",
+ "realm": new_context_result["realm"],
+ "type": "window",
+ },
+ ],
+ result,
+ )
+
+
+@pytest.mark.asyncio
+async def test_iframes(
+ bidi_session,
+ top_context,
+ test_alt_origin,
+ test_origin,
+ test_page_cross_origin_frame,
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"],
+ url=test_page_cross_origin_frame,
+ wait="complete",
+ )
+
+ result = await bidi_session.script.get_realms()
+
+ # Evaluate to get realm id
+ top_context_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"])
+ assert len(contexts) == 1
+ frames = contexts[0]["children"]
+ assert len(frames) == 1
+ frame_context = frames[0]["context"]
+
+ # Evaluate to get realm id
+ frame_context_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(frame_context),
+ await_promise=False,
+ )
+
+ recursive_compare(
+ [
+ {
+ "context": top_context["context"],
+ "origin": test_origin,
+ "realm": top_context_result["realm"],
+ "type": "window",
+ },
+ {
+ "context": frame_context,
+ "origin": test_alt_origin,
+ "realm": frame_context_result["realm"],
+ "type": "window",
+ },
+ ],
+ result,
+ )
+
+ # Clean up origin
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete"
+ )
+
+
+@pytest.mark.asyncio
+async def test_origin(bidi_session, inline, top_context, test_origin):
+ url = inline("<div>foo</div>")
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=url, wait="complete"
+ )
+
+ result = await bidi_session.script.get_realms()
+
+ # Evaluate to get realm id
+ top_context_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ recursive_compare(
+ [
+ {
+ "context": top_context["context"],
+ "origin": test_origin,
+ "realm": top_context_result["realm"],
+ "type": "window",
+ }
+ ],
+ result,
+ )
+
+ # Clean up origin
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete"
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/invalid.py
new file mode 100644
index 0000000000..c15378a6e0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/invalid.py
@@ -0,0 +1,26 @@
+import pytest
+import webdriver.bidi.error as error
+
+pytestmark = pytest.mark.asyncio
+
+
+@pytest.mark.parametrize("context", [False, 42, {}, []])
+async def test_params_context_invalid_type(bidi_session, context):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.get_realms(context=context)
+
+
+async def test_params_context_invalid_value(bidi_session):
+ with pytest.raises(error.NoSuchFrameException):
+ await bidi_session.script.get_realms(context="foo")
+
+
+@pytest.mark.parametrize("type", [False, 42, {}, []])
+async def test_params_type_invalid_type(bidi_session, type):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.get_realms(type=type)
+
+
+async def test_params_type_invalid_value(bidi_session):
+ with pytest.raises(error.InvalidArgumentException):
+ await bidi_session.script.get_realms(type="foo")
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/sandbox.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/sandbox.py
new file mode 100644
index 0000000000..6ce1fee552
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/sandbox.py
@@ -0,0 +1,238 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget
+
+from ... import recursive_compare
+
+PAGE_ABOUT_BLANK = "about:blank"
+
+
+@pytest.mark.asyncio
+async def test_sandbox(bidi_session, top_context):
+ evaluate_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ # Create a sandbox
+ evaluate_in_sandbox_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"], "sandbox"),
+ await_promise=False,
+ )
+
+ result = await bidi_session.script.get_realms()
+
+ recursive_compare(
+ [
+ {
+ "context": top_context["context"],
+ "origin": "null",
+ "realm": evaluate_result["realm"],
+ "type": "window",
+ },
+ {
+ "context": top_context["context"],
+ "origin": "null",
+ "realm": evaluate_in_sandbox_result["realm"],
+ "sandbox": "sandbox",
+ "type": "window",
+ },
+ ],
+ result,
+ )
+
+ # Reload to clean up sandboxes
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete"
+ )
+
+
+@pytest.mark.asyncio
+async def test_origin(bidi_session, inline, top_context, test_origin):
+ url = inline("<div>foo</div>")
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=url, wait="complete"
+ )
+
+ evaluate_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ # Create a sandbox
+ evaluate_in_sandbox_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"], "sandbox"),
+ await_promise=False,
+ )
+
+ result = await bidi_session.script.get_realms()
+
+ recursive_compare(
+ [
+ {
+ "context": top_context["context"],
+ "origin": test_origin,
+ "realm": evaluate_result["realm"],
+ "type": "window",
+ },
+ {
+ "context": top_context["context"],
+ "origin": test_origin,
+ "realm": evaluate_in_sandbox_result["realm"],
+ "sandbox": "sandbox",
+ "type": "window",
+ },
+ ],
+ result,
+ )
+
+ # Reload to clean up sandboxes
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete"
+ )
+
+
+@pytest.mark.asyncio
+async def test_type(bidi_session, top_context):
+ evaluate_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ # Create a sandbox
+ evaluate_in_sandbox_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"], "sandbox"),
+ await_promise=False,
+ )
+
+ # Should be extended when more types are supported
+ result = await bidi_session.script.get_realms(type="window")
+
+ recursive_compare(
+ [
+ {
+ "context": top_context["context"],
+ "origin": "null",
+ "realm": evaluate_result["realm"],
+ "type": "window",
+ },
+ {
+ "context": top_context["context"],
+ "origin": "null",
+ "realm": evaluate_in_sandbox_result["realm"],
+ "sandbox": "sandbox",
+ "type": "window",
+ },
+ ],
+ result,
+ )
+
+ # Reload to clean up sandboxes
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=PAGE_ABOUT_BLANK, wait="complete"
+ )
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("type_hint", ["tab", "window"])
+async def test_multiple_top_level_contexts(
+ bidi_session,
+ test_alt_origin,
+ test_origin,
+ test_page_cross_origin_frame,
+ type_hint,
+):
+ new_context = await bidi_session.browsing_context.create(type_hint=type_hint)
+ await bidi_session.browsing_context.navigate(
+ context=new_context["context"],
+ url=test_page_cross_origin_frame,
+ wait="complete",
+ )
+
+ evaluate_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(new_context["context"]),
+ await_promise=False,
+ )
+
+ # Create a sandbox
+ evaluate_in_sandbox_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(new_context["context"], "sandbox"),
+ await_promise=False,
+ )
+
+ result = await bidi_session.script.get_realms(context=new_context["context"])
+ recursive_compare(
+ [
+ {
+ "context": new_context["context"],
+ "origin": test_origin,
+ "realm": evaluate_result["realm"],
+ "type": "window",
+ },
+ {
+ "context": new_context["context"],
+ "origin": test_origin,
+ "realm": evaluate_in_sandbox_result["realm"],
+ "sandbox": "sandbox",
+ "type": "window",
+ },
+ ],
+ result,
+ )
+
+ contexts = await bidi_session.browsing_context.get_tree(root=new_context["context"])
+ assert len(contexts) == 1
+ frames = contexts[0]["children"]
+ assert len(frames) == 1
+ frame_context = frames[0]["context"]
+
+ evaluate_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(frame_context),
+ await_promise=False,
+ )
+
+ # Create a sandbox in iframe
+ evaluate_in_sandbox_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(frame_context, "sandbox"),
+ await_promise=False,
+ )
+
+ result = await bidi_session.script.get_realms(context=frame_context)
+ recursive_compare(
+ [
+ {
+ "context": frame_context,
+ "origin": test_alt_origin,
+ "realm": evaluate_result["realm"],
+ "type": "window",
+ },
+ {
+ "context": frame_context,
+ "origin": test_alt_origin,
+ "realm": evaluate_in_sandbox_result["realm"],
+ "sandbox": "sandbox",
+ "type": "window",
+ },
+ ],
+ result,
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/type.py b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/type.py
new file mode 100644
index 0000000000..7a8b4d43b7
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/script/get_realms/type.py
@@ -0,0 +1,34 @@
+import pytest
+
+from webdriver.bidi.modules.script import ContextTarget
+
+from ... import recursive_compare
+
+PAGE_ABOUT_BLANK = "about:blank"
+
+
+@pytest.mark.asyncio
+# Should be extended when more types are supported
+@pytest.mark.parametrize("type", ["window"])
+async def test_type(bidi_session, top_context, type):
+ result = await bidi_session.script.get_realms(type=type)
+
+ # Evaluate to get realm id
+ top_context_result = await bidi_session.script.evaluate(
+ raw_result=True,
+ expression="1 + 2",
+ target=ContextTarget(top_context["context"]),
+ await_promise=False,
+ )
+
+ recursive_compare(
+ [
+ {
+ "context": top_context["context"],
+ "origin": "null",
+ "realm": top_context_result["realm"],
+ "type": type,
+ }
+ ],
+ result,
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/session/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/new/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/session/new/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/new/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/new/connect.py b/testing/web-platform/tests/webdriver/tests/bidi/session/new/connect.py
new file mode 100644
index 0000000000..7118f77ea0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/new/connect.py
@@ -0,0 +1,34 @@
+import pytest
+import websockets
+
+import webdriver
+
+# classic session to enable bidi capability manually
+# Intended to be the first test in this file
+@pytest.mark.asyncio
+@pytest.mark.capabilities({"webSocketUrl": True})
+async def test_websocket_url_connect(session):
+ websocket_url = session.capabilities["webSocketUrl"]
+ async with websockets.connect(websocket_url) as websocket:
+ await websocket.send("Hello world!")
+
+# test bidi_session send
+@pytest.mark.asyncio
+async def test_bidi_session_send(bidi_session, send_blocking_command):
+ await send_blocking_command("session.status", {})
+
+# bidi session following a bidi session with a different capabilities
+# to test session recreation
+@pytest.mark.asyncio
+@pytest.mark.capabilities({"acceptInsecureCerts": True})
+async def test_bidi_session_with_different_capability(bidi_session,
+ send_blocking_command):
+ await send_blocking_command("session.status", {})
+
+# classic session following a bidi session to test session
+# recreation
+# Intended to be the last test in this file to make sure
+# classic session is not impacted by bidi tests
+@pytest.mark.asyncio
+def test_classic_after_bidi_session(session):
+ assert not isinstance(session, webdriver.bidi.BidiSession)
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/status/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/session/status/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/status/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/status/status.py b/testing/web-platform/tests/webdriver/tests/bidi/session/status/status.py
new file mode 100644
index 0000000000..eee102fee7
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/status/status.py
@@ -0,0 +1,11 @@
+import pytest
+
+
+# Check that session.status can be used. The actual values for the "ready" and
+# "message" properties are implementation specific.
+@pytest.mark.asyncio
+async def test_bidi_session_status(bidi_session, send_blocking_command):
+ response = await send_blocking_command("session.status", {})
+ assert isinstance(response["ready"], bool)
+ assert isinstance(response["message"], str)
+
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/contexts.py b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/contexts.py
new file mode 100644
index 0000000000..874acf63d7
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/contexts.py
@@ -0,0 +1,277 @@
+import asyncio
+
+import pytest
+
+from ... import create_console_api_message, recursive_compare
+
+# The basic use case of subscribing to all contexts for a single event
+# is covered by tests for each event in the dedicated folders.
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_one_context(
+ bidi_session, subscribe_events, top_context, new_tab, wait_for_event
+):
+ # Subscribe for log events to a specific context
+ await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]])
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Trigger console event in the another context
+ await create_console_api_message(bidi_session, new_tab, "text1")
+
+ assert len(events) == 0
+
+ # Trigger another console event in the observed context
+ on_entry_added = wait_for_event("log.entryAdded")
+ expected_text = await create_console_api_message(bidi_session, top_context, "text2")
+ await on_entry_added
+
+ assert len(events) == 1
+ recursive_compare(
+ {
+ "text": expected_text,
+ },
+ events[0],
+ )
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_one_context_twice(
+ bidi_session, subscribe_events, top_context, wait_for_event
+):
+ # Subscribe twice for log events to a specific context
+ await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]])
+ await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]])
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Trigger a console event in the observed context
+ on_entry_added = wait_for_event("log.entryAdded")
+ expected_text = await create_console_api_message(bidi_session, top_context, "text2")
+ await on_entry_added
+
+ assert len(events) == 1
+ recursive_compare(
+ {
+ "text": expected_text,
+ },
+ events[0],
+ )
+
+ assert len(events) == 1
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_one_context_and_then_to_all(
+ bidi_session, subscribe_events, top_context, new_tab, wait_for_event
+):
+ # Subscribe for log events to a specific context
+ await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]])
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Trigger console event in the another context
+ buffered_event_expected_text = await create_console_api_message(
+ bidi_session, new_tab, "text1"
+ )
+
+ assert len(events) == 0
+
+ # Trigger another console event in the observed context
+ on_entry_added = wait_for_event("log.entryAdded")
+ expected_text = await create_console_api_message(bidi_session, top_context, "text2")
+ await on_entry_added
+
+ assert len(events) == 1
+ recursive_compare(
+ {
+ "text": expected_text,
+ },
+ events[0],
+ )
+
+ events = []
+
+ # Subscribe to all contexts
+ await subscribe_events(events=["log.entryAdded"])
+
+ # Check that we received the buffered event
+ assert len(events) == 1
+ recursive_compare(
+ {
+ "text": buffered_event_expected_text,
+ },
+ events[0],
+ )
+
+ # Trigger again events in each context
+ expected_text = await create_console_api_message(bidi_session, new_tab, "text3")
+ await on_entry_added
+
+ assert len(events) == 2
+ recursive_compare(
+ {
+ "text": expected_text,
+ },
+ events[1],
+ )
+
+ expected_text = await create_console_api_message(bidi_session, top_context, "text4")
+ await on_entry_added
+
+ assert len(events) == 3
+ recursive_compare(
+ {
+ "text": expected_text,
+ },
+ events[2],
+ )
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_all_context_and_then_to_one_again(
+ bidi_session, subscribe_events, top_context, new_tab, wait_for_event
+):
+ # Subscribe to all contexts
+ await subscribe_events(events=["log.entryAdded"])
+ # Subscribe to one of the contexts again
+ await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]])
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Trigger console event in the context to which we tried to subscribe twice
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message(bidi_session, top_context, "text1")
+ await on_entry_added
+
+ # Make sure we received only one event
+ assert len(events) == 1
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_top_context_with_iframes(
+ bidi_session,
+ subscribe_events,
+ wait_for_event,
+ top_context,
+ test_page_multiple_frames,
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_multiple_frames, wait="complete"
+ )
+
+ contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"])
+
+ assert len(contexts[0]["children"]) == 2
+ frame_1 = contexts[0]["children"][0]
+ frame_2 = contexts[0]["children"][1]
+
+ # Subscribe to the top context
+ await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]])
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Trigger console event in the first iframe
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message(bidi_session, frame_1, "text1")
+ await on_entry_added
+
+ # Make sure we received the event
+ assert len(events) == 1
+
+ # Trigger console event in the second iframe
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message(bidi_session, frame_2, "text2")
+ await on_entry_added
+
+ # Make sure we received the second event as well
+ assert len(events) == 2
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_child_context(
+ bidi_session,
+ subscribe_events,
+ wait_for_event,
+ top_context,
+ test_page_multiple_frames,
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_multiple_frames, wait="complete"
+ )
+
+ contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"])
+
+ assert len(contexts[0]["children"]) == 2
+ frame_1 = contexts[0]["children"][0]
+ frame_2 = contexts[0]["children"][1]
+
+ # Subscribe to the first frame context
+ await subscribe_events(events=["log.entryAdded"], contexts=[frame_1["context"]])
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Trigger console event in the top context
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message(bidi_session, top_context, "text1")
+ await on_entry_added
+
+ # Make sure we received the event
+ assert len(events) == 1
+
+ # Trigger console event in the second iframe
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message(bidi_session, frame_2, "text2")
+ await on_entry_added
+
+ # Make sure we received the second event as well
+ assert len(events) == 2
+
+ remove_listener()
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/events.py b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/events.py
new file mode 100644
index 0000000000..a4c20365b3
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/events.py
@@ -0,0 +1,138 @@
+import asyncio
+
+import pytest
+
+# The basic use case of subscribing globally for a single event
+# is covered by tests for each event in the dedicated folders.
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_module(bidi_session, subscribe_events, wait_for_event):
+ # Subscribe to all browsing context events
+ await subscribe_events(events=["browsingContext"])
+
+ # Track all received browsing context events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(method)
+
+ remove_listener_contextCreated = bidi_session.add_event_listener(
+ "browsingContext.contextCreated", on_event
+ )
+ remove_listener_domContentLoaded = bidi_session.add_event_listener(
+ "browsingContext.domContentLoaded", on_event
+ )
+ remove_listener_load = bidi_session.add_event_listener(
+ "browsingContext.load", on_event
+ )
+
+ # Wait for the last event
+ on_entry_added = wait_for_event("browsingContext.load")
+ await bidi_session.browsing_context.create(type_hint="tab")
+ await on_entry_added
+
+ assert len(events) == 3
+
+ remove_listener_contextCreated()
+ remove_listener_domContentLoaded()
+ remove_listener_load()
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_one_event_and_then_to_module(
+ bidi_session, subscribe_events, wait_for_event
+):
+ # Subscribe to one event
+ await subscribe_events(events=["browsingContext.contextCreated"])
+
+ # Track all received browsing context events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(method)
+
+ remove_listener_contextCreated = bidi_session.add_event_listener(
+ "browsingContext.contextCreated", on_event
+ )
+
+ on_entry_added = wait_for_event("browsingContext.contextCreated")
+ await bidi_session.browsing_context.create(type_hint="tab")
+ await on_entry_added
+
+ assert len(events) == 1
+ assert "browsingContext.contextCreated" in events
+
+ # Subscribe to all browsing context events
+ await subscribe_events(events=["browsingContext"])
+
+ # Clean up the event list
+ events = []
+
+ remove_listener_domContentLoaded = bidi_session.add_event_listener(
+ "browsingContext.domContentLoaded", on_event
+ )
+ remove_listener_load = bidi_session.add_event_listener(
+ "browsingContext.load", on_event
+ )
+
+ # Wait for the last event
+ on_entry_added = wait_for_event("browsingContext.load")
+ await bidi_session.browsing_context.create(type_hint="tab")
+ await on_entry_added
+
+ # Make sure we didn't receive duplicates
+ assert len(events) == 3
+
+ remove_listener_contextCreated()
+ remove_listener_domContentLoaded()
+ remove_listener_load()
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_module_and_then_to_one_event_again(
+ bidi_session, subscribe_events, wait_for_event
+):
+ # Subscribe to all browsing context events
+ await subscribe_events(events=["browsingContext"])
+
+ # Track all received browsing context events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(method)
+
+ remove_listener_contextCreated = bidi_session.add_event_listener(
+ "browsingContext.contextCreated", on_event
+ )
+ remove_listener_domContentLoaded = bidi_session.add_event_listener(
+ "browsingContext.domContentLoaded", on_event
+ )
+ remove_listener_load = bidi_session.add_event_listener(
+ "browsingContext.load", on_event
+ )
+
+ # Wait for the last event
+ on_entry_added = wait_for_event("browsingContext.load")
+ await bidi_session.browsing_context.create(type_hint="tab")
+ await on_entry_added
+
+ assert len(events) == 3
+
+ # Subscribe to one event again
+ await subscribe_events(events=["browsingContext.contextCreated"])
+
+ # Clean up the event list
+ events = []
+
+ # Wait for the last event
+ on_entry_added = wait_for_event("browsingContext.load")
+ await bidi_session.browsing_context.create(type_hint="tab")
+ await on_entry_added
+
+ # Make sure we didn't receive duplicates
+ assert len(events) == 3
+
+ remove_listener_contextCreated()
+ remove_listener_domContentLoaded()
+ remove_listener_load()
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/invalid.py
new file mode 100644
index 0000000000..c3e03dfaf1
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/subscribe/invalid.py
@@ -0,0 +1,153 @@
+import asyncio
+
+import pytest
+from webdriver.bidi.error import InvalidArgumentException, NoSuchFrameException
+
+from ... import create_console_api_message
+
+
+@pytest.mark.asyncio
+async def test_params_empty(bidi_session, send_blocking_command):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command("session.subscribe", {})
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("value", [None, True, "foo", 42, {}])
+async def test_params_events_invalid_type(bidi_session, send_blocking_command, value):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command("session.subscribe", {"events": value})
+
+
+@pytest.mark.asyncio
+async def test_params_events_empty(bidi_session):
+ response = await bidi_session.session.subscribe(events=[])
+ assert response == {}
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("value", [None, True, 42, [], {}])
+async def test_params_events_value_invalid_type(send_blocking_command, value):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command("session.subscribe", {"events": [value]})
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("value", ["", "foo", "foo.bar", "log.invalidEvent"])
+async def test_params_events_value_invalid_event_name(send_blocking_command, value):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command("session.subscribe", {"events": [value]})
+
+
+@pytest.mark.asyncio
+async def test_params_events_value_valid_and_invalid_event_names(
+ bidi_session, send_blocking_command, top_context
+):
+ with pytest.raises(InvalidArgumentException):
+ await send_blocking_command(
+ "session.subscribe", {"events": ["log.entryAdded", "some.invalidEvent"]}
+ )
+
+ # Make sure that we didn't subscribe to log.entryAdded because of the error
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ await create_console_api_message(bidi_session, top_context, "text1")
+
+ # Wait for some time before checking the events array
+ await asyncio.sleep(0.5)
+ assert len(events) == 0
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("value", [True, "foo", 42, {}])
+async def test_params_contexts_invalid_type(bidi_session, send_blocking_command, value):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command(
+ "session.subscribe",
+ {
+ "events": [],
+ "contexts": value,
+ }
+ )
+
+
+@pytest.mark.asyncio
+async def test_params_contexts_empty(bidi_session):
+ response = await bidi_session.session.subscribe(events=[], contexts=[])
+ assert response == {}
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("value", [None, True, 42, [], {}])
+async def test_params_contexts_value_invalid_type(send_blocking_command, value):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command(
+ "session.subscribe",
+ {
+ "events": [],
+ "contexts": [value],
+ }
+ )
+
+
+@pytest.mark.asyncio
+async def test_params_contexts_value_invalid_value(send_blocking_command):
+ with pytest.raises(NoSuchFrameException):
+ response = await send_blocking_command(
+ "session.subscribe",
+ {
+ "events": [],
+ "contexts": ["foo"],
+ }
+ )
+
+
+@pytest.mark.asyncio
+async def test_params_contexts_valid_and_invalid_value(
+ bidi_session, send_blocking_command, top_context
+):
+ with pytest.raises(NoSuchFrameException):
+ await send_blocking_command(
+ "session.subscribe",
+ {"events": ["log.entryAdded"], "contexts": [top_context["context"], "foo"]},
+ )
+
+ # Make sure that we didn't subscribe to log.entryAdded because of error
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ await create_console_api_message(bidi_session, top_context, "text1")
+
+ # Wait for some time before checking the events array
+ await asyncio.sleep(0.5)
+ assert len(events) == 0
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_closed_tab(bidi_session, send_blocking_command):
+ new_tab = await bidi_session.browsing_context.create(type_hint="tab")
+ await bidi_session.browsing_context.close(context=new_tab["context"])
+
+ # Try to subscribe to the closed context
+ with pytest.raises(NoSuchFrameException):
+ response = await send_blocking_command(
+ "session.subscribe",
+ {"events": ["log.entryAdded"], "contexts": [new_tab["context"]]},
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/__init__.py b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/contexts.py b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/contexts.py
new file mode 100644
index 0000000000..99584987ef
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/contexts.py
@@ -0,0 +1,167 @@
+import asyncio
+
+import pytest
+
+from ... import create_console_api_message, recursive_compare
+
+# The basic use case of unsubscribing from all contexts for a single event
+# is covered by tests for each event in the dedicated folders.
+
+
+@pytest.mark.asyncio
+async def test_unsubscribe_from_one_context(
+ bidi_session, top_context, new_tab, wait_for_event
+):
+ # Subscribe for log events to multiple contexts
+ await bidi_session.session.subscribe(
+ events=["log.entryAdded"], contexts=[top_context["context"], new_tab["context"]]
+ )
+ # Unsubscribe from log events in one of the contexts
+ await bidi_session.session.unsubscribe(
+ events=["log.entryAdded"], contexts=[top_context["context"]]
+ )
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Trigger console event in the unsubscribed context
+ await create_console_api_message(bidi_session, top_context, "text1")
+ assert len(events) == 0
+
+ # Trigger another console event in the still observed context
+ on_entry_added = wait_for_event("log.entryAdded")
+ expected_text = await create_console_api_message(bidi_session, new_tab, "text2")
+ await on_entry_added
+
+ assert len(events) == 1
+ recursive_compare(
+ {
+ "text": expected_text,
+ },
+ events[0],
+ )
+
+ remove_listener()
+ await bidi_session.session.unsubscribe(
+ events=["log.entryAdded"], contexts=[new_tab["context"]]
+ )
+
+
+@pytest.mark.asyncio
+async def test_unsubscribe_from_top_context_with_iframes(
+ bidi_session,
+ top_context,
+ test_page_same_origin_frame,
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_same_origin_frame, wait="complete"
+ )
+
+ contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"])
+
+ assert len(contexts[0]["children"]) == 1
+ frame = contexts[0]["children"][0]
+
+ # Subscribe and unsubscribe to the top context
+ await bidi_session.session.subscribe(
+ events=["log.entryAdded"], contexts=[top_context["context"]]
+ )
+ await bidi_session.session.unsubscribe(
+ events=["log.entryAdded"], contexts=[top_context["context"]]
+ )
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Trigger the event in the frame
+ await create_console_api_message(bidi_session, frame, "text1")
+
+ assert len(events) == 0
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_unsubscribe_from_child_context(
+ bidi_session,
+ top_context,
+ test_page_same_origin_frame,
+):
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_page_same_origin_frame, wait="complete"
+ )
+
+ contexts = await bidi_session.browsing_context.get_tree(root=top_context["context"])
+
+ assert len(contexts[0]["children"]) == 1
+ frame = contexts[0]["children"][0]
+
+ # Subscribe to top context
+ await bidi_session.session.subscribe(
+ events=["log.entryAdded"], contexts=[top_context["context"]]
+ )
+ # Unsubscribe from the frame context
+ await bidi_session.session.unsubscribe(
+ events=["log.entryAdded"], contexts=[frame["context"]]
+ )
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Trigger the event in the frame
+ await create_console_api_message(bidi_session, frame, "text1")
+ # Trigger the event in the top context
+ await create_console_api_message(bidi_session, top_context, "text2")
+
+ # Make sure we didn't receive any of the triggered events
+ assert len(events) == 0
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_unsubscribe_from_one_context_after_navigation(
+ bidi_session, top_context, test_alt_origin
+):
+ await bidi_session.session.subscribe(
+ events=["log.entryAdded"], contexts=[top_context["context"]]
+ )
+
+ await bidi_session.browsing_context.navigate(
+ context=top_context["context"], url=test_alt_origin, wait="complete"
+ )
+
+ await bidi_session.session.unsubscribe(
+ events=["log.entryAdded"], contexts=[top_context["context"]]
+ )
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ # Trigger the event
+ await create_console_api_message(bidi_session, top_context, "text1")
+
+ # Make sure we successfully unsubscribed
+ assert len(events) == 0
+
+ remove_listener()
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/events.py b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/events.py
new file mode 100644
index 0000000000..6cf2a896d2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/events.py
@@ -0,0 +1,83 @@
+import asyncio
+
+import pytest
+from tests.support.sync import AsyncPoll
+from webdriver.error import TimeoutException
+
+# The basic use case of unsubscribing globally from a single event
+# is covered by tests for each event in the dedicated folders.
+
+
+@pytest.mark.asyncio
+async def test_unsubscribe_from_module(bidi_session):
+ await bidi_session.session.subscribe(events=["browsingContext"])
+ await bidi_session.session.unsubscribe(events=["browsingContext"])
+
+ # Track all received browsing context events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener_contextCreated = bidi_session.add_event_listener(
+ "browsingContext.contextCreated", on_event
+ )
+ remove_listener_domContentLoaded = bidi_session.add_event_listener(
+ "browsingContext.domContentLoaded", on_event
+ )
+ remove_listener_load = bidi_session.add_event_listener(
+ "browsingContext.load", on_event
+ )
+
+ await bidi_session.browsing_context.create(type_hint="tab")
+
+ wait = AsyncPoll(bidi_session, timeout=0.5)
+ with pytest.raises(TimeoutException):
+ await wait.until(lambda _: len(events) > 0)
+
+ remove_listener_contextCreated()
+ remove_listener_domContentLoaded()
+ remove_listener_load()
+
+
+@pytest.mark.asyncio
+async def test_subscribe_to_module_unsubscribe_from_one_event(
+ bidi_session, wait_for_event
+):
+ await bidi_session.session.subscribe(events=["browsingContext"])
+
+ # Unsubscribe from one event
+ await bidi_session.session.unsubscribe(events=["browsingContext.domContentLoaded"])
+
+ # Track all received browsing context events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(method)
+
+ remove_listener_contextCreated = bidi_session.add_event_listener(
+ "browsingContext.contextCreated", on_event
+ )
+ remove_listener_domContentLoaded = bidi_session.add_event_listener(
+ "browsingContext.domContentLoaded", on_event
+ )
+ remove_listener_load = bidi_session.add_event_listener(
+ "browsingContext.load", on_event
+ )
+
+ # Wait for the last event
+ on_entry_added = wait_for_event("browsingContext.load")
+ await bidi_session.browsing_context.create(type_hint="tab")
+ await on_entry_added
+
+ # Make sure we didn't receive browsingContext.domContentLoaded event
+ assert len(events) == 2
+ assert "browsingContext.domContentLoaded" not in events
+
+ remove_listener_contextCreated()
+ remove_listener_domContentLoaded()
+ remove_listener_load()
+
+ # Unsubscribe from the rest of the events
+ await bidi_session.session.unsubscribe(events=["browsingContext.contextCreated"])
+ await bidi_session.session.unsubscribe(events=["browsingContext.load"])
diff --git a/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/invalid.py b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/invalid.py
new file mode 100644
index 0000000000..a4fa34e8a1
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/bidi/session/unsubscribe/invalid.py
@@ -0,0 +1,234 @@
+import pytest
+from webdriver.bidi.error import InvalidArgumentException, NoSuchFrameException
+
+from ... import create_console_api_message
+
+
+@pytest.mark.asyncio
+async def test_params_empty(bidi_session, send_blocking_command):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command("session.unsubscribe", {})
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("value", [None, True, "foo", 42, {}])
+async def test_params_events_invalid_type(bidi_session, send_blocking_command, value):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command("session.unsubscribe", {"events": value})
+
+
+@pytest.mark.asyncio
+async def test_params_events_empty(bidi_session):
+ response = await bidi_session.session.unsubscribe(events=[])
+ assert response == {}
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("value", [None, True, 42, [], {}])
+async def test_params_events_value_invalid_type(send_blocking_command, value):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command("session.unsubscribe", {"events": [value]})
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("value", ["", "foo", "foo.bar"])
+async def test_params_events_value_invalid_event_name(send_blocking_command, value):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command("session.unsubscribe", {"events": [value]})
+
+
+@pytest.mark.asyncio
+async def test_params_events_value_valid_and_invalid_event_name(
+ bidi_session, subscribe_events, send_blocking_command, wait_for_event, top_context
+):
+ # Subscribe to a valid event
+ await subscribe_events(events=["log.entryAdded"])
+
+ # Try to unsubscribe from the valid and an invalid event
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command(
+ "session.unsubscribe", {"events": ["log.entryAdded", "some.invalidEvent"]}
+ )
+
+ # Make sure that we didn't unsubscribe from log.entryAdded because of the error
+ # and events are still coming
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message(bidi_session, top_context, "text1")
+ await on_entry_added
+
+ assert len(events) == 1
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_unsubscribe_from_one_event_and_then_from_module(
+ bidi_session, subscribe_events, send_blocking_command
+):
+ await subscribe_events(events=["browsingContext"])
+
+ # Unsubscribe from one event
+ await bidi_session.session.unsubscribe(events=["browsingContext.domContentLoaded"])
+
+ # Try to unsubscribe from all events
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command(
+ "session.unsubscribe", {"events": ["browsingContext"]}
+ )
+
+ # Unsubscribe from the rest of the events
+ await bidi_session.session.unsubscribe(events=["browsingContext.contextCreated"])
+ await bidi_session.session.unsubscribe(events=["browsingContext.load"])
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("value", [True, "foo", 42, {}])
+async def test_params_contexts_invalid_type(bidi_session, send_blocking_command, value):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command(
+ "session.unsubscribe",
+ {
+ "events": [],
+ "contexts": value,
+ }
+ )
+
+
+@pytest.mark.asyncio
+async def test_params_contexts_empty(bidi_session):
+ response = await bidi_session.session.unsubscribe(events=[], contexts=[])
+ assert response == {}
+
+
+@pytest.mark.asyncio
+@pytest.mark.parametrize("value", [None, True, 42, [], {}])
+async def test_params_contexts_value_invalid_type(send_blocking_command, value):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command(
+ "session.unsubscribe",
+ {
+ "events": [],
+ "contexts": [value],
+ }
+ )
+
+
+@pytest.mark.asyncio
+async def test_params_contexts_value_invalid_value(send_blocking_command):
+ with pytest.raises(NoSuchFrameException):
+ response = await send_blocking_command(
+ "session.unsubscribe",
+ {
+ "events": [],
+ "contexts": ["foo"],
+ },
+ )
+
+
+@pytest.mark.asyncio
+async def test_params_contexts_value_valid_and_invalid_value(
+ bidi_session, subscribe_events, send_blocking_command, wait_for_event, top_context
+):
+ # Subscribe to a valid context
+ await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]])
+
+ # Try to unsubscribe from the valid and an invalid context
+ with pytest.raises(NoSuchFrameException):
+ response = await send_blocking_command(
+ "session.unsubscribe",
+ {"events": ["log.entryAdded"], "contexts": [top_context["context"], "foo"]},
+ )
+
+ # Make sure that we didn't unsubscribe from the valid context because of the error
+ # and events are still coming
+
+ # Track all received log.entryAdded events in the events array
+ events = []
+
+ async def on_event(method, data):
+ events.append(data)
+
+ remove_listener = bidi_session.add_event_listener("log.entryAdded", on_event)
+
+ on_entry_added = wait_for_event("log.entryAdded")
+ await create_console_api_message(bidi_session, top_context, "text1")
+ await on_entry_added
+
+ assert len(events) == 1
+
+ remove_listener()
+
+
+@pytest.mark.asyncio
+async def test_unsubscribe_from_closed_tab(
+ bidi_session, subscribe_events, send_blocking_command
+):
+ new_tab = await bidi_session.browsing_context.create(type_hint="tab")
+ # Subscribe to a new context
+ await subscribe_events(events=["log.entryAdded"], contexts=[new_tab["context"]])
+
+ await bidi_session.browsing_context.close(context=new_tab["context"])
+
+ # Try to unsubscribe from the closed context
+ with pytest.raises(NoSuchFrameException):
+ response = await send_blocking_command(
+ "session.unsubscribe",
+ {"events": ["log.entryAdded"], "contexts": [new_tab["context"]]},
+ )
+
+
+@pytest.mark.asyncio
+async def test_params_unsubscribe_globally_without_subscription(send_blocking_command):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command(
+ "session.unsubscribe", {"events": ["log.entryAdded"]}
+ )
+
+
+@pytest.mark.asyncio
+async def test_params_unsubscribe_globally_with_individual_subscription(
+ subscribe_events, send_blocking_command, top_context
+):
+ # Subscribe to one context
+ await subscribe_events(events=["log.entryAdded"], contexts=[top_context["context"]])
+
+ # Try to unsubscribe globally
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command(
+ "session.unsubscribe", {"events": ["log.entryAdded"]}
+ )
+
+
+@pytest.mark.asyncio
+async def test_params_unsubscribe_from_one_context_without_subscription(
+ send_blocking_command, top_context
+):
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command(
+ "session.unsubscribe",
+ {"events": ["log.entryAdded"], "contexts": [top_context["context"]]},
+ )
+
+
+@pytest.mark.asyncio
+async def test_params_unsubscribe_from_one_context_with_global_subscription(
+ subscribe_events, send_blocking_command, top_context
+):
+ # Subscribe to all contexts
+ await subscribe_events(events=["log.entryAdded"])
+
+ # Try to unsubscribe from one context
+ with pytest.raises(InvalidArgumentException):
+ response = await send_blocking_command(
+ "session.unsubscribe",
+ {"events": ["log.entryAdded"], "contexts": [top_context["context"]]},
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/close_window/__init__.py b/testing/web-platform/tests/webdriver/tests/close_window/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/close_window/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/close_window/close.py b/testing/web-platform/tests/webdriver/tests/close_window/close.py
new file mode 100644
index 0000000000..7b382fa9bb
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/close_window/close.py
@@ -0,0 +1,102 @@
+import pytest
+from webdriver import error
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def close(session):
+ return session.transport.send(
+ "DELETE", "session/{session_id}/window".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = close(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, url):
+ new_handle = session.new_window()
+
+ session.url = url("/webdriver/tests/support/html/frames.html")
+
+ subframe = session.find.css("#sub-frame", all=False)
+ session.switch_frame(subframe)
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ button.click()
+
+ response = close(session)
+ handles = assert_success(response)
+ assert handles == [new_handle]
+
+
+def test_close_browsing_context(session):
+ original_handles = session.handles
+
+ new_handle = session.new_window()
+ session.window_handle = new_handle
+
+ response = close(session)
+ handles = assert_success(response, original_handles)
+ assert session.handles == original_handles
+ assert new_handle not in handles
+
+
+def test_close_browsing_context_with_dismissed_beforeunload_prompt(session, inline):
+ original_handles = session.handles
+
+ new_handle = session.new_window()
+ session.window_handle = new_handle
+
+ session.url = inline("""
+ <input type="text">
+ <script>
+ window.addEventListener("beforeunload", function (event) {
+ event.preventDefault();
+ });
+ </script>
+ """)
+
+ session.find.css("input", all=False).send_keys("foo")
+
+ response = close(session)
+ handles = assert_success(response, original_handles)
+ assert session.handles == original_handles
+ assert new_handle not in handles
+
+ # A beforeunload prompt has to be automatically dismissed
+ with pytest.raises(error.NoSuchWindowException):
+ session.alert.text
+
+
+def test_close_last_browsing_context(session):
+ assert len(session.handles) == 1
+ response = close(session)
+
+ assert_success(response, [])
+
+ # With no more open top-level browsing contexts, the session is closed.
+ session.session_id = None
+
+
+def test_element_usage_after_closing_browsing_context(session, inline):
+ session.url = inline("<p id='a'>foo")
+ a = session.find.css("p", all=False)
+ first = session.window_handle
+
+ second = session.new_window(type_hint="tab")
+ session.window_handle = second
+
+ session.url = inline("<p id='b'>other")
+ b = session.find.css("p", all=False)
+
+ session.window_handle = first
+ response = close(session)
+ assert_success(response)
+ assert len(session.handles) == 1
+
+ session.window_handle = second
+ assert b.attribute("id") == "b"
diff --git a/testing/web-platform/tests/webdriver/tests/close_window/user_prompts.py b/testing/web-platform/tests/webdriver/tests/close_window/user_prompts.py
new file mode 100644
index 0000000000..c0f9cc7610
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/close_window/user_prompts.py
@@ -0,0 +1,119 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def close(session):
+ return session.transport.send(
+ "DELETE", "session/{session_id}/window".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ original_handle = session.window_handle
+ new_handle = session.new_window()
+ session.window_handle = new_handle
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = close(session)
+ assert_success(response)
+
+ # Asserting that the dialog was handled requires valid top-level browsing
+ # context, so we must switch to the original window.
+ session.window_handle = original_handle
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert new_handle not in session.handles
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ new_handle = session.new_window()
+ session.window_handle = new_handle
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = close(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert new_handle in session.handles
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ new_handle = session.new_window()
+ session.window_handle = new_handle
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = close(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert new_handle in session.handles
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type):
+ # retval not testable for confirm and prompt because window is gone
+ check_user_prompt_closed_without_exception(dialog_type, None)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type):
+ # retval not testable for confirm and prompt because window is gone
+ check_user_prompt_closed_without_exception(dialog_type, None)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/conftest.py b/testing/web-platform/tests/webdriver/tests/conftest.py
new file mode 100644
index 0000000000..fe9f5cd268
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/conftest.py
@@ -0,0 +1,5 @@
+pytest_plugins = (
+ "tests.support.fixtures",
+ "tests.support.fixtures_bidi",
+ "tests.support.fixtures_http",
+)
diff --git a/testing/web-platform/tests/webdriver/tests/delete_all_cookies/__init__.py b/testing/web-platform/tests/webdriver/tests/delete_all_cookies/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/delete_all_cookies/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/delete_all_cookies/delete.py b/testing/web-platform/tests/webdriver/tests/delete_all_cookies/delete.py
new file mode 100644
index 0000000000..86d66561b0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/delete_all_cookies/delete.py
@@ -0,0 +1,22 @@
+from tests.support.asserts import assert_error, assert_success
+
+
+def delete_all_cookies(session):
+ return session.transport.send(
+ "DELETE", "/session/{session_id}/cookie".format(**vars(session)))
+
+
+def test_null_response_value(session, url):
+ response = delete_all_cookies(session)
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = delete_all_cookies(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = delete_all_cookies(session)
+ assert_error(response, "no such window")
diff --git a/testing/web-platform/tests/webdriver/tests/delete_all_cookies/user_prompts.py b/testing/web-platform/tests/webdriver/tests/delete_all_cookies/user_prompts.py
new file mode 100644
index 0000000000..dca4f3c8bf
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/delete_all_cookies/user_prompts.py
@@ -0,0 +1,119 @@
+# META: timeout=long
+
+import pytest
+
+from webdriver.error import NoSuchCookieException
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def delete_all_cookies(session):
+ return session.transport.send(
+ "DELETE", "/session/{session_id}/cookie".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, create_cookie):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ create_cookie("foo", value="bar", path="/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = delete_all_cookies(session)
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.cookies() == []
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, create_cookie):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ create_cookie("foo", value="bar", path="/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = delete_all_cookies(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.cookies() != []
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, create_cookie):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ create_cookie("foo", value="bar", path="/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = delete_all_cookies(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert session.cookies() != []
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/delete_cookie/__init__.py b/testing/web-platform/tests/webdriver/tests/delete_cookie/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/delete_cookie/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/delete_cookie/delete.py b/testing/web-platform/tests/webdriver/tests/delete_cookie/delete.py
new file mode 100644
index 0000000000..4b37c0453b
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/delete_cookie/delete.py
@@ -0,0 +1,29 @@
+from tests.support.asserts import assert_error, assert_success
+
+
+def delete_cookie(session, name):
+ return session.transport.send(
+ "DELETE", "/session/{session_id}/cookie/{name}".format(
+ session_id=session.session_id,
+ name=name))
+
+
+def test_null_response_value(session, url):
+ response = delete_cookie(session, "foo")
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = delete_cookie(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = delete_cookie(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_unknown_cookie(session):
+ response = delete_cookie(session, "stilton")
+ assert_success(response)
diff --git a/testing/web-platform/tests/webdriver/tests/delete_cookie/user_prompts.py b/testing/web-platform/tests/webdriver/tests/delete_cookie/user_prompts.py
new file mode 100644
index 0000000000..1ed7db6e8e
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/delete_cookie/user_prompts.py
@@ -0,0 +1,119 @@
+# META: timeout=long
+
+import pytest
+
+from webdriver.error import NoSuchCookieException
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def delete_cookie(session, name):
+ return session.transport.send("DELETE", "/session/%s/cookie/%s" % (session.session_id, name))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, create_cookie):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ create_cookie("foo", value="bar", path="/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = delete_cookie(session, "foo")
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ with pytest.raises(NoSuchCookieException):
+ assert session.cookies("foo")
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, create_cookie):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ create_cookie("foo", value="bar", path="/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = delete_cookie(session, "foo")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.cookies("foo")
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, create_cookie):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ create_cookie("foo", value="bar", path="/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = delete_cookie(session, "foo")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert session.cookies("foo")
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/delete_session/__init__.py b/testing/web-platform/tests/webdriver/tests/delete_session/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/delete_session/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/delete_session/delete.py b/testing/web-platform/tests/webdriver/tests/delete_session/delete.py
new file mode 100644
index 0000000000..a3032cc134
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/delete_session/delete.py
@@ -0,0 +1,42 @@
+import pytest
+from webdriver import error
+
+from tests.support.asserts import assert_success
+
+
+def delete_session(session):
+ return session.transport.send("DELETE", "session/{session_id}".format(**vars(session)))
+
+
+def test_null_response_value(session):
+ response = delete_session(session)
+ value = assert_success(response)
+ assert value is None
+
+ # Need an explicit call to session.end() to notify the test harness
+ # that a new session needs to be created for subsequent tests.
+ session.end()
+
+
+def test_dismissed_beforeunload_prompt(session, inline):
+ session.url = inline("""
+ <input type="text">
+ <script>
+ window.addEventListener("beforeunload", function (event) {
+ event.preventDefault();
+ });
+ </script>
+ """)
+
+ session.find.css("input", all=False).send_keys("foo")
+
+ response = delete_session(session)
+ assert_success(response)
+
+ # A beforeunload prompt has to be automatically dismissed, and the session deleted
+ with pytest.raises(error.InvalidSessionIdException):
+ session.alert.text
+
+ # Need an explicit call to session.end() to notify the test harness
+ # that a new session needs to be created for subsequent tests.
+ session.end()
diff --git a/testing/web-platform/tests/webdriver/tests/dismiss_alert/__init__.py b/testing/web-platform/tests/webdriver/tests/dismiss_alert/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/dismiss_alert/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/dismiss_alert/dismiss.py b/testing/web-platform/tests/webdriver/tests/dismiss_alert/dismiss.py
new file mode 100644
index 0000000000..a28dec7687
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/dismiss_alert/dismiss.py
@@ -0,0 +1,109 @@
+import pytest
+
+from webdriver.error import NoSuchAlertException
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.helpers import wait_for_new_handle
+from tests.support.sync import Poll
+
+
+def dismiss_alert(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/alert/dismiss".format(**vars(session)))
+
+
+def test_null_response_value(session, inline):
+ session.url = inline("<script>window.alert('Hello');</script>")
+
+ response = dismiss_alert(session)
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = dismiss_alert(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = dismiss_alert(session)
+ assert_error(response, "no such alert")
+
+
+def test_no_user_prompt(session):
+ response = dismiss_alert(session)
+ assert_error(response, "no such alert")
+
+
+def test_dismiss_alert(session, inline):
+ session.url = inline("<script>window.alert('Hello');</script>")
+
+ response = dismiss_alert(session)
+ assert_success(response)
+
+ with pytest.raises(NoSuchAlertException):
+ session.alert.text
+
+
+def test_dismiss_confirm(session, inline):
+ session.url = inline("<script>window.result = window.confirm('Hello');</script>")
+
+ response = dismiss_alert(session)
+ assert_success(response)
+
+ with pytest.raises(NoSuchAlertException):
+ session.alert.text
+
+ assert session.execute_script("return window.result;") is False
+
+
+def test_dismiss_prompt(session, inline):
+ session.url = inline("""
+ <script>window.result = window.prompt('Enter Your Name: ', 'Federer');</script>
+ """)
+
+ response = dismiss_alert(session)
+ assert_success(response)
+
+ with pytest.raises(NoSuchAlertException):
+ session.alert.text
+
+ assert session.execute_script("return window.result") is None
+
+
+def test_unexpected_alert(session):
+ session.execute_script("setTimeout(function() { alert('Hello'); }, 100);")
+
+ wait = Poll(
+ session,
+ timeout=5,
+ ignored_exceptions=NoSuchAlertException,
+ message="No user prompt with text 'Hello' detected")
+ wait.until(lambda s: s.alert.text == "Hello")
+
+ response = dismiss_alert(session)
+ assert_success(response)
+
+ with pytest.raises(NoSuchAlertException):
+ session.alert.text
+
+
+def test_dismiss_in_popup_window(session, inline):
+ orig_handles = session.handles
+
+ session.url = inline("""
+ <button onclick="window.open('about:blank', '_blank', 'width=500; height=200;resizable=yes');">open</button>
+ """)
+ button = session.find.css("button", all=False)
+ button.click()
+
+ session.window_handle = wait_for_new_handle(session, orig_handles)
+ session.url = inline("""
+ <script>window.alert("Hello")</script>
+ """)
+
+ response = dismiss_alert(session)
+ assert_success(response)
+
+ with pytest.raises(NoSuchAlertException):
+ session.alert.text
diff --git a/testing/web-platform/tests/webdriver/tests/element_clear/__init__.py b/testing/web-platform/tests/webdriver/tests/element_clear/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_clear/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/element_clear/clear.py b/testing/web-platform/tests/webdriver/tests/element_clear/clear.py
new file mode 100644
index 0000000000..6f4f31ffa4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_clear/clear.py
@@ -0,0 +1,444 @@
+# META: timeout=long
+
+import pytest
+from webdriver import Element
+
+from tests.support.asserts import (
+ assert_element_has_focus,
+ assert_error,
+ assert_events_equal,
+ assert_in_events,
+ assert_success,
+)
+
+
+@pytest.fixture
+def tracked_events():
+ return [
+ "blur",
+ "change",
+ "focus",
+ ]
+
+
+def element_clear(session, element):
+ return session.transport.send(
+ "POST", "/session/{session_id}/element/{element_id}/clear".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+
+@pytest.fixture(scope="session")
+def text_file(tmpdir_factory):
+ fh = tmpdir_factory.mktemp("tmp").join("hello.txt")
+ fh.write("hello")
+ return fh
+
+
+def test_null_response_value(session, inline):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ response = element_clear(session, element)
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window):
+ element = Element("foo", session)
+ response = element_clear(session, element)
+ assert_error(response, "no such window")
+
+ original_handle, element = closed_window
+ response = element_clear(session, element)
+ assert_error(response, "no such window")
+
+ session.window_handle = original_handle
+ response = element_clear(session, element)
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ element = Element("foo", session)
+
+ response = element_clear(session, element)
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = element_clear(session, element)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = element_clear(session, element)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = element_clear(session, button)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ response = element_clear(session, element)
+ assert_error(response, "stale element reference")
+
+
+def test_pointer_interactable(session, inline):
+ session.url = inline("<input style='margin-left: -1000px' value=foobar>")
+ element = session.find.css("input", all=False)
+
+ response = element_clear(session, element)
+ assert_error(response, "element not interactable")
+
+
+def test_keyboard_interactable(session, inline):
+ session.url = inline("""
+ <input value=foobar>
+ <div></div>
+
+ <style>
+ div {
+ position: absolute;
+ background: blue;
+ top: 0;
+ }
+ </style>
+ """)
+ element = session.find.css("input", all=False)
+ assert element.property("value") == "foobar"
+
+ response = element_clear(session, element)
+ assert_success(response)
+ assert element.property("value") == ""
+
+
+@pytest.mark.parametrize("type,value,default",
+ [("number", "42", ""),
+ ("range", "42", "50"),
+ ("email", "foo@example.com", ""),
+ ("password", "password", ""),
+ ("search", "search", ""),
+ ("tel", "999", ""),
+ ("text", "text", ""),
+ ("url", "https://example.com/", ""),
+ ("color", "#ff0000", "#000000"),
+ ("date", "2017-12-26", ""),
+ ("datetime", "2017-12-26T19:48", ""),
+ ("datetime-local", "2017-12-26T19:48", ""),
+ ("time", "19:48", ""),
+ ("month", "2017-11", ""),
+ ("week", "2017-W52", "")])
+def test_input(session, inline, add_event_listeners, tracked_events, type, value, default):
+ session.url = inline("<input type=%s value='%s'>" % (type, value))
+ element = session.find.css("input", all=False)
+ add_event_listeners(element, tracked_events)
+ assert element.property("value") == value
+
+ response = element_clear(session, element)
+ assert_success(response)
+ assert element.property("value") == default
+ assert_in_events(session, ["focus", "change", "blur"])
+ assert_element_has_focus(session.execute_script("return document.body"))
+
+
+@pytest.mark.parametrize("type",
+ ["number",
+ "range",
+ "email",
+ "password",
+ "search",
+ "tel",
+ "text",
+ "url",
+ "color",
+ "date",
+ "datetime",
+ "datetime-local",
+ "time",
+ "month",
+ "week",
+ "file"])
+def test_input_disabled(session, inline, type):
+ session.url = inline("<input type=%s disabled>" % type)
+ element = session.find.css("input", all=False)
+
+ response = element_clear(session, element)
+ assert_error(response, "invalid element state")
+
+
+@pytest.mark.parametrize("type",
+ ["number",
+ "range",
+ "email",
+ "password",
+ "search",
+ "tel",
+ "text",
+ "url",
+ "color",
+ "date",
+ "datetime",
+ "datetime-local",
+ "time",
+ "month",
+ "week",
+ "file"])
+def test_input_readonly(session, inline, type):
+ session.url = inline("<input type=%s readonly>" % type)
+ element = session.find.css("input", all=False)
+
+ response = element_clear(session, element)
+ assert_error(response, "invalid element state")
+
+
+def test_textarea(session, inline, add_event_listeners, tracked_events):
+ session.url = inline("<textarea>foobar</textarea>")
+ element = session.find.css("textarea", all=False)
+ add_event_listeners(element, tracked_events)
+ assert element.property("value") == "foobar"
+
+ response = element_clear(session, element)
+ assert_success(response)
+ assert element.property("value") == ""
+ assert_in_events(session, ["focus", "change", "blur"])
+
+
+def test_textarea_disabled(session, inline):
+ session.url = inline("<textarea disabled></textarea>")
+ element = session.find.css("textarea", all=False)
+
+ response = element_clear(session, element)
+ assert_error(response, "invalid element state")
+
+
+def test_textarea_readonly(session, inline):
+ session.url = inline("<textarea readonly></textarea>")
+ element = session.find.css("textarea", all=False)
+
+ response = element_clear(session, element)
+ assert_error(response, "invalid element state")
+
+
+def test_input_file(session, text_file, inline):
+ session.url = inline("<input type=file>")
+ element = session.find.css("input", all=False)
+ element.send_keys(str(text_file))
+
+ response = element_clear(session, element)
+ assert_success(response)
+ assert element.property("value") == ""
+
+
+def test_input_file_multiple(session, text_file, inline):
+ session.url = inline("<input type=file multiple>")
+ element = session.find.css("input", all=False)
+ element.send_keys(str(text_file))
+ element.send_keys(str(text_file))
+
+ response = element_clear(session, element)
+ assert_success(response)
+ assert element.property("value") == ""
+
+
+def test_select(session, inline):
+ session.url = inline("""
+ <select>
+ <option>foo
+ </select>
+ """)
+ select = session.find.css("select", all=False)
+ option = session.find.css("option", all=False)
+
+ response = element_clear(session, select)
+ assert_error(response, "invalid element state")
+ response = element_clear(session, option)
+ assert_error(response, "invalid element state")
+
+
+def test_button(session, inline):
+ session.url = inline("<button></button>")
+ button = session.find.css("button", all=False)
+
+ response = element_clear(session, button)
+ assert_error(response, "invalid element state")
+
+
+def test_button_with_subtree(session, inline):
+ """
+ Elements inside button elements are interactable.
+ """
+ session.url = inline("""
+ <button>
+ <input value=foobar>
+ </button>
+ """)
+ text_field = session.find.css("input", all=False)
+
+ response = element_clear(session, text_field)
+ assert_success(response)
+
+
+def test_contenteditable(session, inline, add_event_listeners, tracked_events):
+ session.url = inline("<p contenteditable>foobar</p>")
+ element = session.find.css("p", all=False)
+ add_event_listeners(element, tracked_events)
+ assert element.property("innerHTML") == "foobar"
+
+ response = element_clear(session, element)
+ assert_success(response)
+ assert element.property("innerHTML") == ""
+ assert_events_equal(session, ["focus", "blur"])
+ assert_element_has_focus(session.execute_script("return document.body"))
+
+
+def test_designmode(session, inline):
+ session.url = inline("foobar")
+ element = session.find.css("body", all=False)
+ assert element.property("innerHTML") == "foobar"
+ session.execute_script("document.designMode = 'on'")
+
+ response = element_clear(session, element)
+ assert_success(response)
+ assert element.property("innerHTML") in ["", "<br>"]
+ assert_element_has_focus(session.execute_script("return document.body"))
+
+
+def test_resettable_element_focus_when_empty(session, inline, add_event_listeners, tracked_events):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+ add_event_listeners(element, tracked_events)
+ assert element.property("value") == ""
+
+ response = element_clear(session, element)
+ assert_success(response)
+ assert element.property("value") == ""
+ assert_events_equal(session, [])
+
+
+@pytest.mark.parametrize("type,invalid_value",
+ [("number", "foo"),
+ ("range", "foo"),
+ ("email", "foo"),
+ ("url", "foo"),
+ ("color", "foo"),
+ ("date", "foo"),
+ ("datetime", "foo"),
+ ("datetime-local", "foo"),
+ ("time", "foo"),
+ ("month", "foo"),
+ ("week", "foo")])
+def test_resettable_element_does_not_satisfy_validation_constraints(session, inline, type, invalid_value):
+ """
+ Some UAs allow invalid input to certain types of constrained
+ form controls. For example, Gecko allows non-valid characters
+ to be typed into <input type=number> but Chrome does not.
+ Since we want to test that Element Clear works for clearing the
+ invalid characters in these UAs, it is fine to skip this test
+ where UAs do not allow the element to not satisfy its constraints.
+ """
+ session.url = inline("<input type=%s>" % type)
+ element = session.find.css("input", all=False)
+
+ def is_valid(element):
+ return session.execute_script("""
+ var input = arguments[0];
+ return input.validity.valid;
+ """, args=(element,))
+
+ # value property does not get updated if the input is invalid
+ element.send_keys(invalid_value)
+
+ # UA does not allow invalid input for this form control type
+ if is_valid(element):
+ return
+
+ response = element_clear(session, element)
+ assert_success(response)
+ assert is_valid(element)
+
+
+@pytest.mark.parametrize("type",
+ ["checkbox",
+ "radio",
+ "hidden",
+ "submit",
+ "button",
+ "image"])
+def test_non_editable_inputs(session, inline, type):
+ session.url = inline("<input type=%s>" % type)
+ element = session.find.css("input", all=False)
+
+ response = element_clear(session, element)
+ assert_error(response, "invalid element state")
+
+
+def test_scroll_into_view(session, inline):
+ session.url = inline("""
+ <input value=foobar>
+ <div style='height: 200vh; width: 5000vh'>
+ """)
+ element = session.find.css("input", all=False)
+ assert element.property("value") == "foobar"
+ assert session.execute_script("return window.pageYOffset") == 0
+
+ # scroll to the bottom right of the page
+ session.execute_script("""
+ var body = document.body;
+ window.scrollTo(body.scrollWidth, body.scrollHeight);
+ """)
+
+ # clear and scroll back to the top of the page
+ response = element_clear(session, element)
+ assert_success(response)
+ assert element.property("value") == ""
+
+ # check if element cleared is scrolled into view
+ rect = session.execute_script("""
+ var input = arguments[0];
+ var rect = input.getBoundingClientRect();
+ return {"top": rect.top,
+ "left": rect.left,
+ "height": rect.height,
+ "width": rect.width};
+ """, args=(element,))
+ window = session.execute_script("""
+ return {"innerHeight": window.innerHeight,
+ "innerWidth": window.innerWidth,
+ "pageXOffset": window.pageXOffset,
+ "pageYOffset": window.pageYOffset};
+ """)
+
+ assert rect["top"] < (window["innerHeight"] + window["pageYOffset"]) and \
+ rect["left"] < (window["innerWidth"] + window["pageXOffset"]) and \
+ (rect["top"] + element.rect["height"]) > window["pageYOffset"] and \
+ (rect["left"] + element.rect["width"]) > window["pageXOffset"]
diff --git a/testing/web-platform/tests/webdriver/tests/element_clear/user_prompts.py b/testing/web-platform/tests/webdriver/tests/element_clear/user_prompts.py
new file mode 100644
index 0000000000..7a8564a684
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_clear/user_prompts.py
@@ -0,0 +1,131 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def element_clear(session, element):
+ return session.transport.send(
+ "POST", "/session/{session_id}/element/{element_id}/clear".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input type=text>")
+ element = session.find.css("input", all=False)
+ element.send_keys("foo")
+
+ assert element.property("value") == "foo"
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = element_clear(session, element)
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert element.property("value") == ""
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input type=text>")
+ element = session.find.css("input", all=False)
+ element.send_keys("foo")
+
+ assert element.property("value") == "foo"
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = element_clear(session, element)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert element.property("value") == "foo"
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input type=text>")
+ element = session.find.css("input", all=False)
+ element.send_keys("foo")
+
+ assert element.property("value") == "foo"
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = element_clear(session, element)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert element.property("value") == "foo"
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/__init__.py b/testing/web-platform/tests/webdriver/tests/element_click/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/bubbling.py b/testing/web-platform/tests/webdriver/tests/element_click/bubbling.py
new file mode 100644
index 0000000000..7620ec3224
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/bubbling.py
@@ -0,0 +1,157 @@
+from tests.support.asserts import assert_success
+
+
+def element_click(session, element):
+ return session.transport.send(
+ "POST", "/session/{session_id}/element/{element_id}/click".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+
+def test_click_event_bubbles_to_parents(session, inline):
+ session.url = inline("""
+ <style>
+ body * {
+ margin: 10px;
+ padding: 10px;
+ border: 1px solid blue;
+ }
+ </style>
+
+ <div id=three>THREE
+ <div id=two>TWO
+ <div id=one>ONE</div>
+ </div>
+ </div>
+
+ <script>
+ window.clicks = [];
+
+ var elements = document.querySelectorAll("div");
+ for (var level = 0; level < elements.length; level++) {
+ elements[level].addEventListener("click", function(clickEvent) {
+ window.clicks.push(clickEvent.currentTarget);
+ });
+ }
+ </script>
+ """)
+ three, two, one = session.find.css("div")
+ one.click()
+
+ clicks = session.execute_script("return window.clicks")
+ assert one in clicks
+ assert two in clicks
+ assert three in clicks
+
+
+def test_spin_event_loop(session, inline):
+ """
+ Wait until the user agent event loop has spun enough times to
+ process the DOM events generated by clicking.
+ """
+ session.url = inline("""
+ <style>
+ body * {
+ margin: 10px;
+ padding: 10px;
+ border: 1px solid blue;
+ }
+ </style>
+
+ <div id=three>THREE
+ <div id=two>TWO
+ <div id=one>ONE</div>
+ </div>
+ </div>
+
+ <script>
+ window.delayedClicks = [];
+
+ var elements = document.querySelectorAll("div");
+ for (var level = 0; level < elements.length; level++) {
+ elements[level].addEventListener("click", function(clickEvent) {
+ var target = clickEvent.currentTarget;
+ setTimeout(function() { window.delayedClicks.push(target); }, 0);
+ });
+ }
+ </script>
+ """)
+ three, two, one = session.find.css("div")
+ one.click()
+
+ delayed_clicks = session.execute_script("return window.delayedClicks")
+ assert one in delayed_clicks
+ assert two in delayed_clicks
+ assert three in delayed_clicks
+
+
+def test_element_disappears_during_click(session, inline):
+ """
+ When an element in the event bubbling order disappears (its CSS
+ display style is set to "none") during a click, Gecko and Blink
+ exhibit different behaviour. Whilst Chrome fires a "click"
+ DOM event on <body>, Firefox does not.
+
+ A WebDriver implementation may choose to wait for this event to let
+ the event loops spin enough times to let click events propagate,
+ so this is a corner case test that Firefox does not hang indefinitely.
+ """
+ session.url = inline("""
+ <style>
+ #over,
+ #under {
+ position: absolute;
+ top: 8px;
+ left: 8px;
+ width: 100px;
+ height: 100px;
+ }
+
+ #over {
+ background: blue;
+ opacity: .5;
+ }
+ #under {
+ background: yellow;
+ }
+
+ #log {
+ margin-top: 120px;
+ }
+ </style>
+
+ <body id="body">
+ <div id=under></div>
+ <div id=over></div>
+
+ <div id=log></div>
+ </body>
+
+ <script>
+ let under = document.querySelector("#under");
+ let over = document.querySelector("#over");
+ let body = document.querySelector("body");
+ let log = document.querySelector("#log");
+
+ function logEvent({type, target, currentTarget}) {
+ log.innerHTML += "<p></p>";
+ log.lastElementChild.textContent =
+ `${type} in ${target.id} (handled by ${currentTarget.id})`;
+ }
+
+ for (let ev of ["click", "mousedown", "mouseup"]) {
+ under.addEventListener(ev, logEvent);
+ over.addEventListener(ev, logEvent);
+ body.addEventListener(ev, logEvent);
+ }
+
+ over.addEventListener("mousedown", function(mousedownEvent) {
+ over.style.display = "none";
+ });
+ </script>
+ """)
+ over = session.find.css("#over", all=False)
+
+ # should not time out
+ response = element_click(session, over)
+ assert_success(response)
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/center_point.py b/testing/web-platform/tests/webdriver/tests/element_click/center_point.py
new file mode 100644
index 0000000000..eb5cc19f14
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/center_point.py
@@ -0,0 +1,64 @@
+import pytest
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.helpers import center_point
+
+
+def element_click(session, element):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/click".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+
+def square(inline, size):
+ return inline("""
+ <style>
+ body {{ margin: 0 }}
+
+ div {{
+ background: blue;
+ width: {size}px;
+ height: {size}px;
+ }}
+ </style>
+
+ <div id=target></div>
+
+ <script>
+ window.clicks = [];
+ let div = document.querySelector("div");
+ div.addEventListener("click", function(e) {{ window.clicks.push([e.clientX, e.clientY]) }});
+ </script>
+ """.format(size=size))
+
+
+def assert_one_click(session):
+ """Asserts there has only been one click, and returns that."""
+ clicks = session.execute_script("return window.clicks")
+ assert len(clicks) == 1
+ return tuple(clicks[0])
+
+
+def test_entirely_in_view(session, inline):
+ session.url = square(inline, 300)
+ element = session.find.css("#target", all=False)
+
+ response = element_click(session, element)
+ assert_success(response)
+
+ click_point = assert_one_click(session)
+ assert click_point == (150, 150)
+
+
+@pytest.mark.parametrize("size", range(1, 11))
+def test_css_pixel_rounding(session, inline, size):
+ session.url = square(inline, size)
+ element = session.find.css("#target", all=False)
+ expected_click_point = center_point(element)
+
+ response = element_click(session, element)
+ assert_success(response)
+
+ actual_click_point = assert_one_click(session)
+ assert actual_click_point == expected_click_point
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/click.py b/testing/web-platform/tests/webdriver/tests/element_click/click.py
new file mode 100644
index 0000000000..d2015445d4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/click.py
@@ -0,0 +1,89 @@
+import pytest
+from webdriver import Element
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def element_click(session, element):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/click".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+
+def test_null_response_value(session, inline):
+ session.url = inline("<p>foo")
+ element = session.find.css("p", all=False)
+
+ response = element_click(session, element)
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window):
+ element = Element("foo", session)
+ response = element_click(session, element)
+ assert_error(response, "no such window")
+
+ original_handle, element = closed_window
+ response = element_click(session, element)
+ assert_error(response, "no such window")
+
+ session.window_handle = original_handle
+ response = element_click(session, element)
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ element = Element("foo", session)
+
+ response = element_click(session, element)
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = element_click(session, element)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = element_click(session, element)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = element_click(session, button)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<div>", "div", as_frame=as_frame)
+
+ response = element_click(session, element)
+ assert_error(response, "stale element reference")
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/events.py b/testing/web-platform/tests/webdriver/tests/element_click/events.py
new file mode 100644
index 0000000000..e3d32c1bcb
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/events.py
@@ -0,0 +1,33 @@
+from webdriver import Element
+from tests.support.asserts import assert_success
+from tests.support.helpers import filter_dict
+
+def get_events(session):
+ """Return list of mouse events recorded in the fixture."""
+ return session.execute_script("return allEvents.events;") or []
+
+def element_click(session, element):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/click".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+def test_event_mousemove(session, url):
+ session.url = url("/webdriver/tests/element_click/support/test_click_wdspec.html")
+
+ element = session.find.css('#outer', all=False)
+ response = element_click(session, element)
+ assert_success(response)
+
+ events = get_events(session)
+ assert len(events) == 4
+
+ expected = [
+ {"type": "mousemove", "buttons": 0, "button": 0},
+ {"type": "mousedown", "buttons": 1, "button": 0},
+ {"type": "mouseup", "buttons": 0, "button": 0},
+ {"type": "click", "buttons": 0, "button": 0},
+ ]
+ filtered_events = [filter_dict(e, expected[0]) for e in events]
+
+ assert expected == filtered_events
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/file_upload.py b/testing/web-platform/tests/webdriver/tests/element_click/file_upload.py
new file mode 100644
index 0000000000..73832d0f85
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/file_upload.py
@@ -0,0 +1,16 @@
+from tests.support.asserts import assert_error, assert_success
+
+
+def element_click(session, element):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/click".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+
+def test_file_upload_state(session,inline):
+ session.url = inline("<input type=file>")
+
+ element = session.find.css("input", all=False)
+ response = element_click(session, element)
+ assert_error(response, "invalid argument")
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/interactability.py b/testing/web-platform/tests/webdriver/tests/element_click/interactability.py
new file mode 100644
index 0000000000..d55860c874
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/interactability.py
@@ -0,0 +1,130 @@
+import pytest
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def element_click(session, element):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/click".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+
+def test_display_none(session, inline):
+ session.url = inline("""<button style="display: none">foobar</button>""")
+ element = session.find.css("button", all=False)
+
+ response = element_click(session, element)
+ assert_error(response, "element not interactable")
+
+
+def test_visibility_hidden(session, inline):
+ session.url = inline("""<button style="visibility: hidden">foobar</button>""")
+ element = session.find.css("button", all=False)
+
+ response = element_click(session, element)
+ assert_error(response, "element not interactable")
+
+
+def test_hidden(session, inline):
+ session.url = inline("<button hidden>foobar</button>")
+ element = session.find.css("button", all=False)
+
+ response = element_click(session, element)
+ assert_error(response, "element not interactable")
+
+
+def test_disabled(session, inline):
+ session.url = inline("""<button disabled>foobar</button>""")
+ element = session.find.css("button", all=False)
+
+ response = element_click(session, element)
+ assert_success(response)
+
+
+@pytest.mark.parametrize("transform", ["translate(-100px, -100px)", "rotate(50deg)"])
+def test_element_not_interactable_css_transform(session, inline, transform):
+ session.url = inline("""
+ <div style="width: 500px; height: 100px;
+ background-color: blue; transform: {transform};">
+ <input type=button>
+ </div>""".format(transform=transform))
+ element = session.find.css("input", all=False)
+ response = element_click(session, element)
+ assert_error(response, "element not interactable")
+
+
+def test_element_not_interactable_out_of_view(session, inline):
+ session.url = inline("""
+ <style>
+ input {
+ position: absolute;
+ margin-top: -100vh;
+ background: red;
+ }
+ </style>
+
+ <input>
+ """)
+ element = session.find.css("input", all=False)
+ response = element_click(session, element)
+ assert_error(response, "element not interactable")
+
+
+@pytest.mark.parametrize("tag_name", ["div", "span"])
+def test_zero_sized_element(session, inline, tag_name):
+ session.url = inline("<{0}></{0}>".format(tag_name))
+ element = session.find.css(tag_name, all=False)
+
+ response = element_click(session, element)
+ assert_error(response, "element not interactable")
+
+
+def test_element_intercepted(session, inline):
+ session.url = inline("""
+ <style>
+ div {
+ position: absolute;
+ height: 100vh;
+ width: 100vh;
+ background: blue;
+ top: 0;
+ left: 0;
+ }
+ </style>
+
+ <input type=button value=Roger>
+ <div></div>
+ """)
+ element = session.find.css("input", all=False)
+ response = element_click(session, element)
+ assert_error(response, "element click intercepted")
+
+
+def test_element_intercepted_no_pointer_events(session, inline):
+ session.url = inline("""<input type=button value=Roger style="pointer-events: none">""")
+ element = session.find.css("input", all=False)
+ response = element_click(session, element)
+ assert_error(response, "element click intercepted")
+
+
+def test_element_not_visible_overflow_hidden(session, inline):
+ session.url = inline("""
+ <style>
+ div {
+ overflow: hidden;
+ height: 50px;
+ background: green;
+ }
+
+ input {
+ margin-top: 100px;
+ background: red;
+ }
+ </style>
+
+ <div><input></div>
+ """)
+ element = session.find.css("input", all=False)
+ response = element_click(session, element)
+ assert_error(response, "element not interactable")
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/navigate.py b/testing/web-platform/tests/webdriver/tests/element_click/navigate.py
new file mode 100644
index 0000000000..492718292a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/navigate.py
@@ -0,0 +1,197 @@
+import pytest
+
+from webdriver.error import NoSuchElementException
+
+from tests.support.asserts import assert_success
+from tests.support.helpers import wait_for_new_handle
+from tests.support.sync import Poll
+
+
+def element_click(session, element):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/click".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+
+def test_numbers_link(session, server_config, inline):
+ link = "/webdriver/tests/element_click/support/input.html"
+ session.url = inline("<a href={url}>123456</a>".format(url=link))
+ element = session.find.css("a", all=False)
+ response = element_click(session, element)
+ assert_success(response)
+ host = server_config["browser_host"]
+ port = server_config["ports"]["http"][0]
+
+ assert session.url == "http://{host}:{port}{url}".format(host=host, port=port, url=link)
+
+
+def test_multi_line_link(session, server_config, inline):
+ link = "/webdriver/tests/element_click/support/input.html"
+ session.url = inline("""
+ <p style="background-color: yellow; width: 50px;">
+ <a href={url}>Helloooooooooooooooooooo Worlddddddddddddddd</a>
+ </p>""".format(url=link))
+ element = session.find.css("a", all=False)
+ response = element_click(session, element)
+ assert_success(response)
+ host = server_config["browser_host"]
+ port = server_config["ports"]["http"][0]
+
+ assert session.url == "http://{host}:{port}{url}".format(host=host, port=port, url=link)
+
+
+def test_link_unload_event(session, server_config, inline):
+ link = "/webdriver/tests/element_click/support/input.html"
+ session.url = inline("""
+ <body onunload="checkUnload()">
+ <a href={url}>click here</a>
+ <input type=checkbox>
+ <script>
+ function checkUnload() {{
+ document.getElementsByTagName("input")[0].checked = true;
+ }}
+ </script>
+ </body>""".format(url=link))
+
+ element = session.find.css("a", all=False)
+ response = element_click(session, element)
+ assert_success(response)
+
+ host = server_config["browser_host"]
+ port = server_config["ports"]["http"][0]
+ assert session.url == "http://{host}:{port}{url}".format(host=host, port=port, url=link)
+
+ session.back()
+
+ element = session.find.css("input", all=False)
+ response = session.execute_script("""
+ let input = arguments[0];
+ return input.checked;
+ """, args=(element,))
+ assert response is True
+
+
+def test_link_hash(session, inline):
+ id = "anchor"
+ session.url = inline("""
+ <a href="#{url}">aaaa</a>
+ <p id={id} style="margin-top: 5000vh">scroll here</p>
+ """.format(url=id, id=id))
+ old_url = session.url
+
+ element = session.find.css("a", all=False)
+ response = element_click(session, element)
+ assert_success(response)
+
+ new_url = session.url
+ assert "{url}#{id}".format(url=old_url, id=id) == new_url
+
+ element = session.find.css("p", all=False)
+ assert session.execute_script("""
+ let input = arguments[0];
+ rect = input.getBoundingClientRect();
+ return rect["top"] >= 0 && rect["left"] >= 0 &&
+ (rect["top"] + rect["height"]) <= window.innerHeight &&
+ (rect["left"] + rect["width"]) <= window.innerWidth;
+ """, args=(element,)) is True
+
+
+@pytest.mark.parametrize("target", [
+ "",
+ "_blank",
+ "_parent",
+ "_self",
+ "_top",
+])
+def test_link_from_toplevel_context_with_target(session, inline, target):
+ target_page = inline("<p id='foo'>foo</p>")
+
+ session.url = inline("<a href='{}' target='{}'>click</a>".format(target_page, target))
+ element = session.find.css("a", all=False)
+
+ orig_handles = session.handles
+
+ response = element_click(session, element)
+ assert_success(response)
+
+ if target == "_blank":
+ session.window_handle = wait_for_new_handle(session, orig_handles)
+
+ wait = Poll(
+ session,
+ timeout=5,
+ ignored_exceptions=NoSuchElementException,
+ message="Expected element has not been found")
+ wait.until(lambda s: s.find.css("#foo"))
+
+
+@pytest.mark.parametrize("target", [
+ "",
+ "_blank",
+ "_parent",
+ "_self",
+ "_top",
+])
+def test_link_from_nested_context_with_target(session, inline, iframe, target):
+ target_page = inline("<p id='foo'>foo</p>")
+
+ session.url = inline(iframe("<a href='{}' target='{}'>click</a>".format(target_page, target)))
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+ element = session.find.css("a".format(target), all=False)
+
+ orig_handles = session.handles
+
+ response = element_click(session, element)
+ assert_success(response)
+
+ if target == "_blank":
+ session.window_handle = wait_for_new_handle(session, orig_handles)
+
+ # With the current browsing context removed the navigation should
+ # not timeout. Switch to the target context, and wait until the expected
+ # element is available.
+ if target == "_parent":
+ session.switch_frame("parent")
+ elif target == "_top":
+ session.switch_frame(None)
+
+ wait = Poll(
+ session,
+ timeout=5,
+ ignored_exceptions=NoSuchElementException,
+ message="Expected element has not been found")
+ wait.until(lambda s: s.find.css("#foo"))
+
+
+# Capability needed as long as no valid certificate is available:
+# https://github.com/web-platform-tests/wpt/issues/28847
+@pytest.mark.capabilities({"acceptInsecureCerts": True})
+def test_link_cross_origin(session, inline, url):
+ base_path = ("/webdriver/tests/support/html/subframe.html" +
+ "?pipe=header(Cross-Origin-Opener-Policy,same-origin")
+ target_page = url(base_path, protocol="https", domain="alt")
+
+ session.url = inline("<a href='{}'>click me</a>".format(target_page), protocol="https")
+ link = session.find.css("a", all=False)
+
+ response = element_click(session, link)
+ assert_success(response)
+
+ assert session.url == target_page
+
+ session.find.css("#delete", all=False)
+
+
+def test_link_closes_window(session, inline):
+ new_handle = session.new_window()
+ session.window_handle = new_handle
+
+ session.url = inline("""<a href="javascript:window.close()">Close me</a>""")
+ element = session.find.css("a", all=False)
+
+ response = element_click(session, element)
+ assert_success(response)
+
+ assert new_handle not in session.handles
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/scroll_into_view.py b/testing/web-platform/tests/webdriver/tests/element_click/scroll_into_view.py
new file mode 100644
index 0000000000..c2dc648528
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/scroll_into_view.py
@@ -0,0 +1,72 @@
+import pytest
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.helpers import center_point
+
+
+def element_click(session, element):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/click".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+
+def assert_one_click(session):
+ """Asserts there has only been one click, and returns that."""
+ clicks = session.execute_script("return window.clicks")
+ assert len(clicks) == 1
+ return tuple(clicks[0])
+
+
+def test_scroll_into_view(session, inline):
+ session.url = inline("""
+ <input type=text value=Federer
+ style="position: absolute; left: 0vh; top: 500vh">""")
+
+ element = session.find.css("input", all=False)
+ response = element_click(session, element)
+ assert_success(response)
+
+ # Check if element clicked is scrolled into view
+ assert session.execute_script("""
+ let input = arguments[0];
+ rect = input.getBoundingClientRect();
+ return rect["top"] >= 0 && rect["left"] >= 0 &&
+ (rect["top"] + rect["height"]) <= window.innerHeight &&
+ (rect["left"] + rect["width"]) <= window.innerWidth;
+ """, args=(element,)) is True
+
+
+@pytest.mark.parametrize("offset", range(9, 0, -1))
+def test_partially_visible_does_not_scroll(session, offset, inline):
+ session.url = inline("""
+ <style>
+ body {{
+ margin: 0;
+ padding: 0;
+ }}
+
+ div {{
+ background: blue;
+ height: 200px;
+
+ /* make N pixels visible in the viewport */
+ margin-top: calc(100vh - {offset}px);
+ }}
+ </style>
+
+ <div></div>
+
+ <script>
+ window.clicks = [];
+ let target = document.querySelector("div");
+ target.addEventListener("click", function(e) {{ window.clicks.push([e.clientX, e.clientY]); }});
+ </script>
+ """.format(offset=offset))
+ target = session.find.css("div", all=False)
+ assert session.execute_script("return window.scrollY || document.documentElement.scrollTop") == 0
+ response = element_click(session, target)
+ assert_success(response)
+ assert session.execute_script("return window.scrollY || document.documentElement.scrollTop") == 0
+ click_point = assert_one_click(session)
+ assert click_point == center_point(target)
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/select.py b/testing/web-platform/tests/webdriver/tests/element_click/select.py
new file mode 100644
index 0000000000..62d40755b5
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/select.py
@@ -0,0 +1,223 @@
+def test_click_option(session, inline):
+ session.url = inline("""
+ <select>
+ <option>first
+ <option>second
+ </select>""")
+ options = session.find.css("option")
+
+ assert options[0].selected
+ assert not options[1].selected
+
+ options[1].click()
+ assert options[1].selected
+ assert not options[0].selected
+
+
+def test_click_multiple_option(session, inline):
+ session.url = inline("""
+ <select multiple>
+ <option>first
+ <option>second
+ </select>""")
+ options = session.find.css("option")
+
+ assert not options[0].selected
+ assert not options[1].selected
+
+ options[0].click()
+ assert options[0].selected
+ assert not options[1].selected
+
+
+def test_click_preselected_option(session, inline):
+ session.url = inline("""
+ <select>
+ <option>first
+ <option selected>second
+ </select>""")
+ options = session.find.css("option")
+
+ assert not options[0].selected
+ assert options[1].selected
+
+ options[1].click()
+ assert options[1].selected
+ assert not options[0].selected
+
+ options[0].click()
+ assert options[0].selected
+ assert not options[1].selected
+
+
+def test_click_preselected_multiple_option(session, inline):
+ session.url = inline("""
+ <select multiple>
+ <option>first
+ <option selected>second
+ </select>""")
+ options = session.find.css("option")
+
+ assert not options[0].selected
+ assert options[1].selected
+
+ options[1].click()
+ assert not options[1].selected
+ assert not options[0].selected
+
+ options[0].click()
+ assert options[0].selected
+ assert not options[1].selected
+
+
+def test_click_deselects_others(session, inline):
+ session.url = inline("""
+ <select>
+ <option>first
+ <option>second
+ <option>third
+ </select>""")
+ options = session.find.css("option")
+
+ options[0].click()
+ assert options[0].selected
+ options[1].click()
+ assert options[1].selected
+ options[2].click()
+ assert options[2].selected
+ options[0].click()
+ assert options[0].selected
+
+
+def test_click_multiple_does_not_deselect_others(session, inline):
+ session.url = inline("""
+ <select multiple>
+ <option>first
+ <option>second
+ <option>third
+ </select>""")
+ options = session.find.css("option")
+
+ options[0].click()
+ assert options[0].selected
+ options[1].click()
+ assert options[0].selected
+ assert options[1].selected
+ options[2].click()
+ assert options[0].selected
+ assert options[1].selected
+ assert options[2].selected
+
+
+def test_click_selected_option(session, inline):
+ session.url = inline("""
+ <select>
+ <option>first
+ <option>second
+ </select>""")
+ options = session.find.css("option")
+
+ # First <option> is selected in dropdown
+ assert options[0].selected
+ assert not options[1].selected
+
+ options[1].click()
+ assert options[1].selected
+ options[1].click()
+ assert options[1].selected
+
+
+def test_click_selected_multiple_option(session, inline):
+ session.url = inline("""
+ <select multiple>
+ <option>first
+ <option>second
+ </select>""")
+ options = session.find.css("option")
+
+ # No implicitly selected <option> in <select multiple>
+ assert not options[0].selected
+ assert not options[1].selected
+
+ options[0].click()
+ assert options[0].selected
+ assert not options[1].selected
+
+ # Second click in <select multiple> deselects
+ options[0].click()
+ assert not options[0].selected
+ assert not options[1].selected
+
+
+def test_out_of_view_dropdown(session, inline):
+ session.url = inline("""
+ <select>
+ <option>1
+ <option>2
+ <option>3
+ <option>4
+ <option>5
+ <option>6
+ <option>7
+ <option>8
+ <option>9
+ <option>10
+ <option>11
+ <option>12
+ <option>13
+ <option>14
+ <option>15
+ <option>16
+ <option>17
+ <option>18
+ <option>19
+ <option>20
+ </select>""")
+ options = session.find.css("option")
+
+ options[14].click()
+ assert options[14].selected
+
+
+def test_out_of_view_multiple(session, inline):
+ session.url = inline("""
+ <select multiple>
+ <option>1
+ <option>2
+ <option>3
+ <option>4
+ <option>5
+ <option>6
+ <option>7
+ <option>8
+ <option>9
+ <option>10
+ <option>11
+ <option>12
+ <option>13
+ <option>14
+ <option>15
+ <option>16
+ <option>17
+ <option>18
+ <option>19
+ <option>20
+ </select>""")
+ options = session.find.css("option")
+
+ last_option = options[-1]
+ last_option.click()
+ assert last_option.selected
+
+
+def test_option_disabled(session, inline):
+ session.url = inline("""
+ <select>
+ <option disabled>foo
+ <option>bar
+ </select>""")
+ option = session.find.css("option", all=False)
+ assert not option.selected
+
+ option.click()
+ assert not option.selected
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/shadow_dom.py b/testing/web-platform/tests/webdriver/tests/element_click/shadow_dom.py
new file mode 100644
index 0000000000..45493b730d
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/shadow_dom.py
@@ -0,0 +1,101 @@
+import pytest
+from tests.support.asserts import assert_error, assert_success
+
+
+def element_click(session, element):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/click".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+
+@pytest.fixture
+def get_checkbox_dom(inline):
+ return inline("""
+ <style>
+ custom-checkbox-element {
+ display:block; width:20px; height:20px;
+ }
+ </style>
+ <custom-checkbox-element></custom-checkbox-element>
+ <script>
+ customElements.define('custom-checkbox-element',
+ class extends HTMLElement {
+ constructor() {
+ super();
+ this.attachShadow({mode: 'open'}).innerHTML = `
+ <div><input type="checkbox"/></div>
+ `;
+ }
+ });
+ </script>""")
+
+
+@pytest.mark.parametrize("click_on", ["custom_element", "checkbox_element"])
+def test_shadow_element_click(session, get_checkbox_dom, click_on):
+ session.url = get_checkbox_dom
+ custom_element = session.find.css("custom-checkbox-element", all=False)
+ checkbox_element = session.execute_script("return arguments[0].shadowRoot.querySelector('input')",
+ args=(custom_element,))
+ is_pre_checked = session.execute_script("return arguments[0].checked",
+ args=(checkbox_element,))
+ assert is_pre_checked == False
+ response = element_click(session, locals()[click_on])
+ assert_success(response)
+ is_post_checked = session.execute_script("return arguments[0].checked",
+ args=(checkbox_element,))
+ assert is_post_checked == True
+
+
+@pytest.fixture
+def get_nested_shadow_checkbox_dom(inline):
+ return inline("""
+ <style>
+ custom-nesting-element {
+ display:block; width:20px; height:20px;
+ }
+ </style>
+ <custom-nesting-element></custom-nesting-element>
+ <script>
+ customElements.define('custom-nesting-element',
+ class extends HTMLElement {
+ constructor() {
+ super();
+ this.attachShadow({mode: 'open'}).innerHTML = `
+ <style>
+ custom-checkbox-element {
+ display:block; width:20px; height:20px;
+ }
+ </style>
+ <div><custom-checkbox-element></custom-checkbox-element></div>
+ `;
+ }
+ });
+ customElements.define('custom-checkbox-element',
+ class extends HTMLElement {
+ constructor() {
+ super();
+ this.attachShadow({mode: 'open'}).innerHTML = `
+ <div><input type="checkbox"/></div>
+ `;
+ }
+ });
+ </script>""")
+
+
+@pytest.mark.parametrize("click_on", ["outer_element", "inner_element", "checkbox_element"])
+def test_nested_shadow_element_click(session, get_nested_shadow_checkbox_dom, click_on):
+ session.url = get_nested_shadow_checkbox_dom
+ outer_element = session.find.css("custom-nesting-element", all=False)
+ inner_element = session.execute_script("return arguments[0].shadowRoot.querySelector('custom-checkbox-element')",
+ args=(outer_element,))
+ checkbox_element = session.execute_script("return arguments[0].shadowRoot.querySelector('input')",
+ args=(inner_element,))
+ is_pre_checked = session.execute_script("return arguments[0].checked",
+ args=(checkbox_element,))
+ assert is_pre_checked == False
+ click_response = element_click(session, locals()[click_on])
+ assert_success(click_response)
+ is_post_checked = session.execute_script("return arguments[0].checked",
+ args=(checkbox_element,))
+ assert is_post_checked == True
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/support/input.html b/testing/web-platform/tests/webdriver/tests/element_click/support/input.html
new file mode 100644
index 0000000000..e2c6dadd12
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/support/input.html
@@ -0,0 +1,3 @@
+<html>
+ <input type=text value="Hello World">
+</html>
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/support/test_click_wdspec.html b/testing/web-platform/tests/webdriver/tests/element_click/support/test_click_wdspec.html
new file mode 100644
index 0000000000..a9451ec82b
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/support/test_click_wdspec.html
@@ -0,0 +1,100 @@
+<!doctype html>
+<meta charset=utf-8>
+<html>
+<head>
+ <title>Test Element Click</title>
+ <style>
+ div { padding:0px; margin: 0px; }
+ #trackPointer { position: fixed; }
+ #resultContainer { width: 600px; height: 60px; }
+ .area { width: 100px; height: 50px; background-color: #ccc; }
+ .block { width: 5px; height: 5px; border: solid 1px red; }
+ </style>
+ <script>
+ "use strict";
+ var els = {};
+ var allEvents = { events: [] };
+ function displayMessage(message) {
+ document.getElementById("events").innerHTML = "<p>" + message + "</p>";
+ }
+
+ function appendMessage(message) {
+ document.getElementById("events").innerHTML += "<p>" + message + "</p>";
+ }
+
+ function recordPointerEvent(event) {
+ if (event.type === "contextmenu") {
+ event.preventDefault();
+ }
+ allEvents.events.push({
+ "type": event.type,
+ "button": event.button,
+ "buttons": event.buttons,
+ "pageX": event.pageX,
+ "pageY": event.pageY,
+ "ctrlKey": event.ctrlKey,
+ "metaKey": event.metaKey,
+ "altKey": event.altKey,
+ "shiftKey": event.shiftKey,
+ "target": event.target.id
+ });
+ appendMessage(event.type + " " +
+ "button: " + event.button + ", " +
+ "pageX: " + event.pageX + ", " +
+ "pageY: " + event.pageY + ", " +
+ "button: " + event.button + ", " +
+ "buttons: " + event.buttons + ", " +
+ "ctrlKey: " + event.ctrlKey + ", " +
+ "altKey: " + event.altKey + ", " +
+ "metaKey: " + event.metaKey + ", " +
+ "shiftKey: " + event.shiftKey + ", " +
+ "target id: " + event.target.id);
+ }
+
+ function recordFirstPointerMove(event) {
+ recordPointerEvent(event);
+ window.removeEventListener("mousemove", recordFirstPointerMove);
+ }
+
+ function resetEvents() {
+ allEvents.events.length = 0;
+ displayMessage("");
+ }
+
+ function move(el, offsetX, offsetY, timeout) {
+ return function(event) {
+ setTimeout(function() {
+ el.style.top = event.clientY + offsetY + "px";
+ el.style.left = event.clientX + offsetX + "px";
+ }, timeout);
+ };
+ }
+
+ document.addEventListener("DOMContentLoaded", function() {
+ var outer = document.getElementById("outer");
+ window.addEventListener("mousemove", recordFirstPointerMove);
+ outer.addEventListener("click", recordPointerEvent);
+ outer.addEventListener("dblclick", recordPointerEvent);
+ outer.addEventListener("mousedown", recordPointerEvent);
+ outer.addEventListener("mouseup", recordPointerEvent);
+ outer.addEventListener("contextmenu", recordPointerEvent);
+
+ //visual cue for mousemove
+ var pointer = document.getElementById("trackPointer");
+ window.addEventListener("mousemove", move(pointer, 15, 15, 30));
+ });
+ </script>
+</head>
+<body>
+ <div id="trackPointer" class="block"></div>
+ <div>
+ <h2>ClickReporter</h2>
+ <div id="outer" class="area">
+ </div>
+ </div>
+ <div id="resultContainer">
+ <h2>Events</h2>
+ <div id="events"></div>
+ </div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webdriver/tests/element_click/user_prompts.py b/testing/web-platform/tests/webdriver/tests/element_click/user_prompts.py
new file mode 100644
index 0000000000..140aceb3ce
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_click/user_prompts.py
@@ -0,0 +1,122 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def element_click(session, element):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/click".format(
+ session_id=session.session_id,
+ element_id=element.id))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input type=text>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = element_click(session, element)
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.active_element == element
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input type=text>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = element_click(session, element)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.active_element != element
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input type=text>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = element_click(session, element)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert session.active_element != element
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/element_send_keys/__init__.py b/testing/web-platform/tests/webdriver/tests/element_send_keys/__init__.py
new file mode 100644
index 0000000000..a7facf6fcf
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_send_keys/__init__.py
@@ -0,0 +1,2 @@
+def map_files_to_multiline_text(files):
+ return "\n".join(map(lambda f: str(f), files))
diff --git a/testing/web-platform/tests/webdriver/tests/element_send_keys/conftest.py b/testing/web-platform/tests/webdriver/tests/element_send_keys/conftest.py
new file mode 100644
index 0000000000..17bdd162a7
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_send_keys/conftest.py
@@ -0,0 +1,17 @@
+import pytest
+
+
+@pytest.fixture
+def create_files(tmpdir_factory):
+ def inner(filenames):
+ filelist = []
+ tmpdir = tmpdir_factory.mktemp("tmp")
+ for filename in filenames:
+ fh = tmpdir.join(filename)
+ fh.write(filename)
+ filelist.append(fh)
+
+ return filelist
+
+ inner.__name__ = "create_files"
+ return inner
diff --git a/testing/web-platform/tests/webdriver/tests/element_send_keys/content_editable.py b/testing/web-platform/tests/webdriver/tests/element_send_keys/content_editable.py
new file mode 100644
index 0000000000..9db19d5b8a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_send_keys/content_editable.py
@@ -0,0 +1,30 @@
+from tests.support.asserts import assert_element_has_focus
+
+
+def test_sets_insertion_point_to_end(session, inline):
+ session.url = inline('<div contenteditable=true>Hello,</div>')
+ body = session.find.css("body", all=False)
+ assert_element_has_focus(body)
+
+ input = session.find.css("div", all=False)
+ input.send_keys(' world!')
+ text = session.execute_script('return arguments[0].textContent', args=[input])
+ assert "Hello, world!" == text.strip()
+ assert_element_has_focus(input)
+
+
+def test_sets_insertion_point_to_after_last_text_node(session, inline):
+ session.url = inline('<div contenteditable=true>Hel<span>lo</span>,</div>')
+ input = session.find.css("div", all=False)
+ input.send_keys(" world!")
+ text = session.execute_script("return arguments[0].textContent", args=[input])
+ assert "Hello, world!" == text.strip()
+
+
+def test_no_move_caret_if_focused(session, inline):
+ session.url = inline("""<div contenteditable=true>Hel<span>lo</span>,</div>
+<script>document.getElementsByTagName("div")[0].focus()</script>""")
+ input = session.find.css("div", all=False)
+ input.send_keys("world!")
+ text = session.execute_script("return arguments[0].textContent", args=[input])
+ assert "world!Hello," == text.strip()
diff --git a/testing/web-platform/tests/webdriver/tests/element_send_keys/events.py b/testing/web-platform/tests/webdriver/tests/element_send_keys/events.py
new file mode 100644
index 0000000000..4be1432bf3
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_send_keys/events.py
@@ -0,0 +1,85 @@
+import pytest
+
+from tests.support.asserts import (
+ assert_element_has_focus,
+ assert_events_equal,
+ assert_success,
+)
+
+from . import map_files_to_multiline_text
+
+
+@pytest.fixture
+def tracked_events():
+ return [
+ "blur",
+ "change",
+ "focus",
+ "input",
+ "keydown",
+ "keypress",
+ "keyup",
+ ]
+
+
+def element_send_keys(session, element, text):
+ return session.transport.send(
+ "POST", "/session/{session_id}/element/{element_id}/value".format(
+ session_id=session.session_id,
+ element_id=element.id),
+ {"text": text})
+
+
+def test_file_upload(session, create_files, add_event_listeners, tracked_events, inline):
+ expected_events = [
+ "input",
+ "change",
+ ]
+
+ files = create_files(["foo", "bar"])
+
+ session.url = inline("<input type=file multiple>")
+ element = session.find.css("input", all=False)
+ add_event_listeners(element, tracked_events)
+
+ response = element_send_keys(session, element, map_files_to_multiline_text(files))
+ assert_success(response)
+
+ assert_events_equal(session, expected_events)
+
+
+@pytest.mark.parametrize("tag", ["input", "textarea"])
+def test_form_control_send_text(session, add_event_listeners, tracked_events, inline, tag):
+ expected_events = [
+ "focus",
+ "keydown",
+ "keypress",
+ "input",
+ "keyup",
+ "keydown",
+ "keypress",
+ "input",
+ "keyup",
+ "keydown",
+ "keypress",
+ "input",
+ "keyup",
+ ]
+
+ session.url = inline("<%s>" % tag)
+ element = session.find.css(tag, all=False)
+ add_event_listeners(element, tracked_events)
+
+ response = element_send_keys(session, element, "foo")
+ assert_success(response)
+ assert_events_equal(session, expected_events)
+
+
+@pytest.mark.parametrize("tag", ["input", "textarea"])
+def test_not_blurred(session, inline, tag):
+ session.url = inline("<%s>" % tag)
+ element = session.find.css(tag, all=False)
+
+ response = element_send_keys(session, element, "")
+ assert_success(response)
+ assert_element_has_focus(element)
diff --git a/testing/web-platform/tests/webdriver/tests/element_send_keys/file_upload.py b/testing/web-platform/tests/webdriver/tests/element_send_keys/file_upload.py
new file mode 100644
index 0000000000..f62a633c20
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_send_keys/file_upload.py
@@ -0,0 +1,262 @@
+import pytest
+
+from tests.support.asserts import (assert_element_has_focus,
+ assert_error,
+ assert_files_uploaded,
+ assert_success)
+
+from . import map_files_to_multiline_text
+
+
+def element_send_keys(session, element, text):
+ return session.transport.send(
+ "POST", "/session/{session_id}/element/{element_id}/value".format(
+ session_id=session.session_id,
+ element_id=element.id),
+ {"text": text})
+
+
+def test_empty_text(session, inline):
+ session.url = inline("<input type=file>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, "")
+ assert_error(response, "invalid argument")
+
+
+def test_multiple_files(session, create_files, inline):
+ files = create_files(["foo", "bar"])
+
+ session.url = inline("<input type=file multiple>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element,
+ map_files_to_multiline_text(files))
+ assert_success(response)
+
+ assert_files_uploaded(session, element, files)
+
+
+def test_multiple_files_last_path_not_found(session, create_files, inline):
+ files = create_files(["foo", "bar"])
+ files.append("foo bar")
+
+ session.url = inline("<input type=file multiple>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element,
+ map_files_to_multiline_text(files))
+ assert_error(response, "invalid argument")
+
+ assert_files_uploaded(session, element, [])
+
+
+def test_multiple_files_without_multiple_attribute(session, create_files, inline):
+ files = create_files(["foo", "bar"])
+
+ session.url = inline("<input type=file>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element,
+ map_files_to_multiline_text(files))
+ assert_error(response, "invalid argument")
+
+ assert_files_uploaded(session, element, [])
+
+
+def test_multiple_files_send_twice(session, create_files, inline):
+ first_files = create_files(["foo", "bar"])
+ second_files = create_files(["john", "doe"])
+
+ session.url = inline("<input type=file multiple>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element,
+ map_files_to_multiline_text(first_files))
+ assert_success(response)
+
+ response = element_send_keys(session, element,
+ map_files_to_multiline_text(second_files))
+ assert_success(response)
+
+ assert_files_uploaded(session, element, first_files + second_files)
+
+
+def test_multiple_files_reset_with_element_clear(session, create_files, inline):
+ first_files = create_files(["foo", "bar"])
+ second_files = create_files(["john", "doe"])
+
+ session.url = inline("<input type=file multiple>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element,
+ map_files_to_multiline_text(first_files))
+ assert_success(response)
+
+ # Reset already uploaded files
+ element.clear()
+ assert_files_uploaded(session, element, [])
+
+ response = element_send_keys(session, element,
+ map_files_to_multiline_text(second_files))
+ assert_success(response)
+
+ assert_files_uploaded(session, element, second_files)
+
+
+def test_single_file(session, create_files, inline):
+ files = create_files(["foo"])
+
+ session.url = inline("<input type=file>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_success(response)
+
+ assert_files_uploaded(session, element, files)
+
+
+def test_single_file_replaces_without_multiple_attribute(session, create_files, inline):
+ files = create_files(["foo", "bar"])
+
+ session.url = inline("<input type=file>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_success(response)
+
+ response = element_send_keys(session, element, str(files[1]))
+ assert_success(response)
+
+ assert_files_uploaded(session, element, [files[1]])
+
+
+def test_single_file_appends_with_multiple_attribute(session, create_files, inline):
+ files = create_files(["foo", "bar"])
+
+ session.url = inline("<input type=file multiple>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_success(response)
+
+ response = element_send_keys(session, element, str(files[1]))
+ assert_success(response)
+
+ assert_files_uploaded(session, element, files)
+
+
+def test_transparent(session, create_files, inline):
+ files = create_files(["foo"])
+ session.url = inline("""<input type=file style="opacity: 0">""")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_success(response)
+ assert_files_uploaded(session, element, files)
+
+
+def test_obscured(session, create_files, inline):
+ files = create_files(["foo"])
+ session.url = inline("""
+ <style>
+ div {
+ position: absolute;
+ width: 100vh;
+ height: 100vh;
+ background: blue;
+ top: 0;
+ left: 0;
+ }
+ </style>
+
+ <input type=file>
+ <div></div>
+ """)
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_success(response)
+ assert_files_uploaded(session, element, files)
+
+
+def test_outside_viewport(session, create_files, inline):
+ files = create_files(["foo"])
+ session.url = inline("""<input type=file style="margin-left: -100vh">""")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_success(response)
+ assert_files_uploaded(session, element, files)
+
+
+def test_hidden(session, create_files, inline):
+ files = create_files(["foo"])
+ session.url = inline("<input type=file hidden>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_success(response)
+ assert_files_uploaded(session, element, files)
+
+
+def test_display_none(session, create_files, inline):
+ files = create_files(["foo"])
+ session.url = inline("""<input type=file style="display: none">""")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_success(response)
+ assert_files_uploaded(session, element, files)
+
+
+@pytest.mark.capabilities({"strictFileInteractability": False})
+def test_not_focused(session, create_files, inline):
+ files = create_files(["foo"])
+
+ session.url = inline("<input type=file>")
+ body = session.find.css("body", all=False)
+ element = session.find.css("input", all=False)
+ assert_element_has_focus(body)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_success(response)
+ assert_element_has_focus(body)
+
+ assert_files_uploaded(session, element, files)
+
+
+@pytest.mark.capabilities({"strictFileInteractability": True})
+def test_focused(session, create_files, inline):
+ files = create_files(["foo"])
+
+ session.url = inline("<input type=file>")
+ body = session.find.css("body", all=False)
+ element = session.find.css("input", all=False)
+ assert_element_has_focus(body)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_success(response)
+ assert_element_has_focus(element)
+
+ assert_files_uploaded(session, element, files)
+
+
+@pytest.mark.capabilities({"strictFileInteractability": True})
+def test_strict_hidden(session, create_files, inline):
+ files = create_files(["foo"])
+ session.url = inline("<input type=file hidden>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_error(response, "element not interactable")
+
+
+@pytest.mark.capabilities({"strictFileInteractability": True})
+def test_strict_display_none(session, create_files, inline):
+ files = create_files(["foo"])
+ session.url = inline("""<input type=file style="display: none">""")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, str(files[0]))
+ assert_error(response, "element not interactable")
diff --git a/testing/web-platform/tests/webdriver/tests/element_send_keys/form_controls.py b/testing/web-platform/tests/webdriver/tests/element_send_keys/form_controls.py
new file mode 100644
index 0000000000..364d4c28fa
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_send_keys/form_controls.py
@@ -0,0 +1,102 @@
+from tests.support.asserts import assert_element_has_focus
+
+
+def element_send_keys(session, element, text):
+ return session.transport.send(
+ "POST", "/session/{session_id}/element/{element_id}/value".format(
+ session_id=session.session_id,
+ element_id=element.id),
+ {"text": text})
+
+
+def test_input(session, inline):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+ assert element.property("value") == ""
+
+ element_send_keys(session, element, "foo")
+ assert element.property("value") == "foo"
+ assert_element_has_focus(element)
+
+
+def test_textarea(session, inline):
+ session.url = inline("<textarea>")
+ element = session.find.css("textarea", all=False)
+ assert element.property("value") == ""
+
+ element_send_keys(session, element, "foo")
+ assert element.property("value") == "foo"
+ assert_element_has_focus(element)
+
+
+def test_input_append(session, inline):
+ session.url = inline("<input value=a>")
+ body = session.find.css("body", all=False)
+ assert_element_has_focus(body)
+ element = session.find.css("input", all=False)
+ assert element.property("value") == "a"
+
+ element_send_keys(session, element, "b")
+ assert_element_has_focus(element)
+ assert element.property("value") == "ab"
+
+ element_send_keys(session, element, "c")
+ assert element.property("value") == "abc"
+
+
+def test_textarea_append(session, inline):
+ session.url = inline("<textarea>a</textarea>")
+ body = session.find.css("body", all=False)
+ assert_element_has_focus(body)
+ element = session.find.css("textarea", all=False)
+ assert element.property("value") == "a"
+
+ element_send_keys(session, element, "b")
+ assert_element_has_focus(element)
+ assert element.property("value") == "ab"
+
+ element_send_keys(session, element, "c")
+ assert element.property("value") == "abc"
+
+
+def test_input_insert_when_focused(session, inline):
+ session.url = inline("""<input value=a>
+<script>
+let elem = document.getElementsByTagName("input")[0];
+elem.focus();
+elem.setSelectionRange(0, 0);
+</script>""")
+ element = session.find.css("input", all=False)
+ assert element.property("value") == "a"
+
+ element_send_keys(session, element, "b")
+ assert element.property("value") == "ba"
+
+ element_send_keys(session, element, "c")
+ assert element.property("value") == "bca"
+
+
+def test_textarea_insert_when_focused(session, inline):
+ session.url = inline("""<textarea>a</textarea>
+<script>
+let elem = document.getElementsByTagName("textarea")[0];
+elem.focus();
+elem.setSelectionRange(0, 0);
+</script>""")
+ element = session.find.css("textarea", all=False)
+ assert element.property("value") == "a"
+
+ element_send_keys(session, element, "b")
+ assert element.property("value") == "ba"
+
+ element_send_keys(session, element, "c")
+ assert element.property("value") == "bca"
+
+
+def test_date(session, inline):
+ session.url = inline("<input type=date>")
+ element = session.find.css("input", all=False)
+
+ element_send_keys(session, element, "2000-01-01")
+ assert element.property("value") == "2000-01-01"
+ assert_element_has_focus(element)
diff --git a/testing/web-platform/tests/webdriver/tests/element_send_keys/interactability.py b/testing/web-platform/tests/webdriver/tests/element_send_keys/interactability.py
new file mode 100644
index 0000000000..273843fb7b
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_send_keys/interactability.py
@@ -0,0 +1,142 @@
+from tests.support.asserts import assert_error, assert_success
+
+
+def element_send_keys(session, element, text):
+ return session.transport.send(
+ "POST", "/session/{session_id}/element/{element_id}/value".format(
+ session_id=session.session_id,
+ element_id=element.id),
+ {"text": text})
+
+
+def test_body_is_interactable(session, inline):
+ session.url = inline("""
+ <body onkeypress="document.querySelector('input').value += event.key">
+ <input>
+ </body>
+ """)
+
+ element = session.find.css("body", all=False)
+ result = session.find.css("input", all=False)
+
+ # By default body is the active element
+ assert session.active_element == element
+
+ response = element_send_keys(session, element, "foo")
+ assert_success(response)
+ assert session.active_element == element
+ assert result.property("value") == "foo"
+
+
+def test_document_element_is_interactable(session, inline):
+ session.url = inline("""
+ <html onkeypress="document.querySelector('input').value += event.key">
+ <input>
+ </html>
+ """)
+
+ body = session.find.css("body", all=False)
+ element = session.find.css(":root", all=False)
+ result = session.find.css("input", all=False)
+
+ # By default body is the active element
+ assert session.active_element == body
+
+ response = element_send_keys(session, element, "foo")
+ assert_success(response)
+ assert session.active_element == element
+ assert result.property("value") == "foo"
+
+
+def test_iframe_is_interactable(session, inline, iframe):
+ session.url = inline(iframe("""
+ <body onkeypress="document.querySelector('input').value += event.key">
+ <input>
+ </body>
+ """))
+
+ body = session.find.css("body", all=False)
+ frame = session.find.css("iframe", all=False)
+
+ # By default the body has the focus
+ assert session.active_element == body
+
+ response = element_send_keys(session, frame, "foo")
+ assert_success(response)
+ assert session.active_element == frame
+
+ # Any key events are immediately routed to the nested
+ # browsing context's active document.
+ session.switch_frame(frame)
+ result = session.find.css("input", all=False)
+ assert result.property("value") == "foo"
+
+
+def test_transparent_element(session, inline):
+ session.url = inline("""<input style="opacity: 0">""")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ assert_success(response)
+ assert element.property("value") == "foo"
+
+
+def test_readonly_element(session, inline):
+ session.url = inline("<input readonly>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ assert_success(response)
+ assert element.property("value") == ""
+
+
+def test_obscured_element(session, inline):
+ session.url = inline("""
+ <input>
+ <div style="position: relative; top: -3em; height: 5em; background: blue;"></div>
+ """)
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ assert_success(response)
+ assert element.property("value") == "foo"
+
+
+def test_not_a_focusable_element(session, inline):
+ session.url = inline("<div>foo</div>")
+ element = session.find.css("div", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "element not interactable")
+
+
+def test_display_none(session, inline):
+ session.url = inline("""<input style="display: none">""")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "element not interactable")
+
+
+def test_visibility_hidden(session, inline):
+ session.url = inline("""<input style="visibility: hidden">""")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "element not interactable")
+
+
+def test_hidden(session, inline):
+ session.url = inline("<input hidden>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "element not interactable")
+
+
+def test_disabled(session, inline):
+ session.url = inline("""<input disabled>""")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "element not interactable")
diff --git a/testing/web-platform/tests/webdriver/tests/element_send_keys/scroll_into_view.py b/testing/web-platform/tests/webdriver/tests/element_send_keys/scroll_into_view.py
new file mode 100644
index 0000000000..7ccaeaf814
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_send_keys/scroll_into_view.py
@@ -0,0 +1,40 @@
+from tests.support.asserts import assert_success
+from tests.support.helpers import is_element_in_viewport
+
+
+def element_send_keys(session, element, text):
+ return session.transport.send(
+ "POST", "/session/{session_id}/element/{element_id}/value".format(
+ session_id=session.session_id,
+ element_id=element.id),
+ {"text": text})
+
+
+def test_element_outside_of_not_scrollable_viewport(session, inline):
+ session.url = inline("<input style=\"position: relative; left: -9999px;\">")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ assert_success(response)
+
+ assert not is_element_in_viewport(session, element)
+
+
+def test_element_outside_of_scrollable_viewport(session, inline):
+ session.url = inline("<input style=\"margin-top: 102vh;\">")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ assert_success(response)
+
+ assert is_element_in_viewport(session, element)
+
+
+def test_contenteditable_element_outside_of_scrollable_viewport(session, inline):
+ session.url = inline("<div contenteditable style=\"margin-top: 102vh;\"></div>")
+ element = session.find.css("div", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ assert_success(response)
+
+ assert is_element_in_viewport(session, element)
diff --git a/testing/web-platform/tests/webdriver/tests/element_send_keys/send_keys.py b/testing/web-platform/tests/webdriver/tests/element_send_keys/send_keys.py
new file mode 100644
index 0000000000..36b27b92f9
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_send_keys/send_keys.py
@@ -0,0 +1,111 @@
+import pytest
+
+from webdriver import Element
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def element_send_keys(session, element, text):
+ return session.transport.send(
+ "POST", "/session/{session_id}/element/{element_id}/value".format(
+ session_id=session.session_id,
+ element_id=element.id),
+ {"text": text})
+
+
+def test_null_parameter_value(session, http, inline):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ path = "/session/{session_id}/element/{element_id}/value".format(
+ session_id=session.session_id, element_id=element.id)
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_null_response_value(session, inline):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, "foo")
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window):
+ element = Element("foo", session)
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "no such window")
+
+ original_handle, element = closed_window
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "no such window")
+
+ session.window_handle = original_handle
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ element = Element("foo", session)
+
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = element_send_keys(session, button, "foo")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "stale element reference")
+
+
+@pytest.mark.parametrize("value", [True, None, 1, [], {}])
+def test_invalid_text_type(session, inline, value):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ response = element_send_keys(session, element, value)
+ assert_error(response, "invalid argument")
diff --git a/testing/web-platform/tests/webdriver/tests/element_send_keys/user_prompts.py b/testing/web-platform/tests/webdriver/tests/element_send_keys/user_prompts.py
new file mode 100644
index 0000000000..c1046840fa
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/element_send_keys/user_prompts.py
@@ -0,0 +1,123 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def element_send_keys(session, element, text):
+ return session.transport.send(
+ "POST", "/session/{session_id}/element/{element_id}/value".format(
+ session_id=session.session_id,
+ element_id=element.id),
+ {"text": text})
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input type=text>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = element_send_keys(session, element, "foo")
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert element.property("value") == "foo"
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input type=text>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert element.property("value") == ""
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input type=text>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = element_send_keys(session, element, "foo")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert element.property("value") == ""
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/execute_async_script/__init__.py b/testing/web-platform/tests/webdriver/tests/execute_async_script/__init__.py
new file mode 100644
index 0000000000..9cd37ecdca
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_async_script/__init__.py
@@ -0,0 +1,16 @@
+import webdriver.protocol as protocol
+
+
+def execute_async_script(session, script, args=None):
+ if args is None:
+ args = []
+ body = {"script": script, "args": args}
+
+ return session.transport.send(
+ "POST",
+ "/session/{session_id}/execute/async".format(**vars(session)),
+ body,
+ encoder=protocol.Encoder,
+ decoder=protocol.Decoder,
+ session=session,
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/execute_async_script/arguments.py b/testing/web-platform/tests/webdriver/tests/execute_async_script/arguments.py
new file mode 100644
index 0000000000..9d281cd1a5
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_async_script/arguments.py
@@ -0,0 +1,98 @@
+import pytest
+
+from webdriver.client import Element, Frame, ShadowRoot, Window
+
+from tests.support.asserts import assert_error, assert_success
+from . import execute_async_script
+
+
+def test_null(session):
+ value = None
+ result = execute_async_script(session, """
+ arguments[1]([arguments[0] === null, arguments[0]])
+ """, args=[value])
+ actual = assert_success(result)
+
+ assert actual[0] is True
+ assert actual[1] == value
+
+
+@pytest.mark.parametrize("value, expected_type", [
+ (True, "boolean"),
+ (42, "number"),
+ ("foo", "string"),
+], ids=["boolean", "number", "string"])
+def test_primitives(session, value, expected_type):
+ result = execute_async_script(session, """
+ arguments[1]([typeof arguments[0], arguments[0]])
+ """, args=[value])
+ actual = assert_success(result)
+
+ assert actual[0] == expected_type
+ assert actual[1] == value
+
+
+def test_collection(session):
+ value = [1, 2, 3]
+ result = execute_async_script(session, """
+ arguments[1]([Array.isArray(arguments[0]), arguments[0]])
+ """, args=[value])
+ actual = assert_success(result)
+
+ assert actual[0] is True
+ assert actual[1] == value
+
+
+def test_object(session):
+ value = {"foo": "bar", "cheese": 23}
+ result = execute_async_script(session, """
+ arguments[1]([typeof arguments[0], arguments[0]])
+ """, args=[value])
+ actual = assert_success(result)
+
+ assert actual[0] == "object"
+ assert actual[1] == value
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<div>", "div", as_frame=as_frame)
+
+ result = execute_async_script(session, "arguments[1](1);", args=[element])
+ assert_error(result, "stale element reference")
+
+
+@pytest.mark.parametrize("expression, expected_type, expected_class", [
+ ("window.frames[0]", Frame, "Frame"),
+ ("document.getElementById('foo')", Element, "HTMLDivElement"),
+ ("document.getElementById('checkbox').shadowRoot", ShadowRoot, "ShadowRoot"),
+ ("window", Window, "Window")
+], ids=["frame", "node", "shadow-root", "window"])
+def test_element_reference(session, iframe, inline, expression, expected_type, expected_class):
+ session.url = inline(f"""
+ <style>
+ custom-checkbox-element {{
+ display:block; width:20px; height:20px;
+ }}
+ </style>
+ <custom-checkbox-element id='checkbox'></custom-checkbox-element>
+ <script>
+ customElements.define('custom-checkbox-element',
+ class extends HTMLElement {{
+ constructor() {{
+ super();
+ this.attachShadow({{mode: 'open'}}).innerHTML = `
+ <div><input type="checkbox"/></div>
+ `;
+ }}
+ }});
+ </script>
+ <div id="foo"/>
+ {iframe("<p>")}""")
+
+ result = execute_async_script(session, f"arguments[0]({expression})")
+ reference = assert_success(result)
+ assert isinstance(reference, expected_type)
+
+ result = execute_async_script(session, "arguments[1](arguments[0].constructor.name)", [reference])
+ assert_success(result, expected_class)
diff --git a/testing/web-platform/tests/webdriver/tests/execute_async_script/collections.py b/testing/web-platform/tests/webdriver/tests/execute_async_script/collections.py
new file mode 100644
index 0000000000..a9d9e63fea
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_async_script/collections.py
@@ -0,0 +1,150 @@
+import os
+
+from tests.support.asserts import assert_same_element, assert_success
+from . import execute_async_script
+
+
+def test_arguments(session):
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ function func() {
+ return arguments;
+ }
+ resolve(func("foo", "bar"));
+ """)
+ assert_success(response, [u"foo", u"bar"])
+
+
+def test_array(session):
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ resolve([1, 2]);
+ """)
+ assert_success(response, [1, 2])
+
+
+def test_file_list(session, tmpdir, inline):
+ files = [tmpdir.join("foo.txt"), tmpdir.join("bar.txt")]
+
+ session.url = inline("<input type=file multiple>")
+ upload = session.find.css("input", all=False)
+ for file in files:
+ file.write("morn morn")
+ upload.send_keys(str(file))
+
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ resolve(document.querySelector('input').files);
+ """)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == len(files)
+ for expected, actual in zip(files, value):
+ assert isinstance(actual, dict)
+ assert "name" in actual
+ assert isinstance(actual["name"], str)
+ assert os.path.basename(str(expected)) == actual["name"]
+
+
+def test_html_all_collection(session, inline):
+ session.url = inline("""
+ <p>foo
+ <p>bar
+ """)
+ html = session.find.css("html", all=False)
+ head = session.find.css("head", all=False)
+ meta = session.find.css("meta", all=False)
+ body = session.find.css("body", all=False)
+ ps = session.find.css("p")
+
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ resolve(document.all);
+ """)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ # <html>, <head>, <meta>, <body>, <p>, <p>
+ assert len(value) == 6
+
+ assert_same_element(session, html, value[0])
+ assert_same_element(session, head, value[1])
+ assert_same_element(session, meta, value[2])
+ assert_same_element(session, body, value[3])
+ assert_same_element(session, ps[0], value[4])
+ assert_same_element(session, ps[1], value[5])
+
+
+def test_html_collection(session, inline):
+ session.url = inline("""
+ <p>foo
+ <p>bar
+ """)
+ ps = session.find.css("p")
+
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ resolve(document.getElementsByTagName('p'));
+ """)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 2
+ for expected, actual in zip(ps, value):
+ assert_same_element(session, expected, actual)
+
+
+def test_html_form_controls_collection(session, inline):
+ session.url = inline("""
+ <form>
+ <input>
+ <input>
+ </form>
+ """)
+ inputs = session.find.css("input")
+
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ resolve(document.forms[0].elements);
+ """)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 2
+ for expected, actual in zip(inputs, value):
+ assert_same_element(session, expected, actual)
+
+
+def test_html_options_collection(session, inline):
+ session.url = inline("""
+ <select>
+ <option>
+ <option>
+ </select>
+ """)
+ options = session.find.css("option")
+
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ resolve(document.querySelector('select').options);
+ """)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 2
+ for expected, actual in zip(options, value):
+ assert_same_element(session, expected, actual)
+
+
+def test_node_list(session, inline):
+ session.url = inline("""
+ <p>foo
+ <p>bar
+ """)
+ ps = session.find.css("p")
+
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ resolve(document.querySelectorAll('p'));
+ """)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 2
+ for expected, actual in zip(ps, value):
+ assert_same_element(session, expected, actual)
diff --git a/testing/web-platform/tests/webdriver/tests/execute_async_script/cyclic.py b/testing/web-platform/tests/webdriver/tests/execute_async_script/cyclic.py
new file mode 100644
index 0000000000..ff536f3477
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_async_script/cyclic.py
@@ -0,0 +1,78 @@
+from tests.support.asserts import assert_error, assert_same_element, assert_success
+from . import execute_async_script
+
+
+def test_collection_self_reference(session):
+ response = execute_async_script(session, """
+ let arr = [];
+ arr.push(arr);
+ arguments[0](arr);
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_element_self_reference(session, inline):
+ session.url = inline("<div></div>")
+ div = session.find.css("div", all=False)
+
+ response = execute_async_script(session, """
+ let div = document.querySelector("div");
+ div.reference = div;
+ arguments[0](div);
+ """)
+ value = assert_success(response)
+ assert_same_element(session, value, div)
+
+
+def test_object_self_reference(session):
+ response = execute_async_script(session, """
+ let obj = {};
+ obj.reference = obj;
+ arguments[0](obj);
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_collection_self_reference_in_object(session):
+ response = execute_async_script(session, """
+ let arr = [];
+ arr.push(arr);
+ arguments[0]({'value': arr});
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_object_self_reference_in_collection(session):
+ response = execute_async_script(session, """
+ let obj = {};
+ obj.reference = obj;
+ arguments[0]([obj]);
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_element_self_reference_in_collection(session, inline):
+ session.url = inline("<div></div>")
+ divs = session.find.css("div")
+
+ response = execute_async_script(session, """
+ let div = document.querySelector("div");
+ div.reference = div;
+ arguments[0]([div]);
+ """)
+ value = assert_success(response)
+ for expected, actual in zip(divs, value):
+ assert_same_element(session, expected, actual)
+
+
+def test_element_self_reference_in_object(session, inline):
+ session.url = inline("<div></div>")
+ div = session.find.css("div", all=False)
+
+ response = execute_async_script(session, """
+ let div = document.querySelector("div");
+ div.reference = div;
+ arguments[0]({foo: div});
+ """)
+ value = assert_success(response)
+ assert_same_element(session, div, value["foo"])
diff --git a/testing/web-platform/tests/webdriver/tests/execute_async_script/execute_async.py b/testing/web-platform/tests/webdriver/tests/execute_async_script/execute_async.py
new file mode 100644
index 0000000000..2292aecce1
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_async_script/execute_async.py
@@ -0,0 +1,105 @@
+import pytest
+
+from webdriver import Element
+from webdriver.error import NoSuchAlertException
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.sync import Poll
+from . import execute_async_script
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/execute/async".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = execute_async_script(session, "argument[0](1);")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = execute_async_script(session, "argument[0](1);")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ result = execute_async_script(session, """
+ arguments[1](true);
+ """, args=[element])
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ result = execute_async_script(session, """
+ arguments[1](true);
+ """, args=[element])
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ result = execute_async_script(session, """
+ arguments[1](true);
+ """, args=[button])
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_abort_by_user_prompt(session, dialog_type):
+ response = execute_async_script(
+ session,
+ "window.{}('Hello'); arguments[0](1);".format(dialog_type))
+ assert_success(response, None)
+
+ session.alert.accept()
+
+
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_abort_by_user_prompt_twice(session, dialog_type):
+ response = execute_async_script(
+ session,
+ "window.{0}('Hello'); window.{0}('Bye'); arguments[0](1);".format(dialog_type))
+ assert_success(response, None)
+
+ session.alert.accept()
+
+ # The first alert has been accepted by the user prompt handler, the second
+ # alert will still be opened because the current step isn't aborted.
+ wait = Poll(
+ session,
+ timeout=5,
+ message="Second alert has not been opened",
+ ignored_exceptions=NoSuchAlertException
+ )
+ text = wait.until(lambda s: s.alert.text)
+
+ assert text == "Bye"
+
+ session.alert.accept()
diff --git a/testing/web-platform/tests/webdriver/tests/execute_async_script/node.py b/testing/web-platform/tests/webdriver/tests/execute_async_script/node.py
new file mode 100644
index 0000000000..1b204df33e
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_async_script/node.py
@@ -0,0 +1,87 @@
+import pytest
+
+from webdriver.client import Element, Frame, ShadowRoot, Window
+
+from tests.support.asserts import assert_error, assert_success
+from . import execute_async_script
+
+
+PAGE_DATA = """
+ <div id="deep"><p><span></span></p><br/></div>
+ <div id="text-node"><p></p>Lorem</div>
+ <br/>
+ <svg id="foo"></svg>
+ <div id="comment"><!-- Comment --></div>
+ <script>
+ var svg = document.querySelector("svg");
+ svg.setAttributeNS("http://www.w3.org/2000/svg", "svg:foo", "bar");
+ </script>
+"""
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, iframe, inline, as_frame):
+ if as_frame:
+ session.url = inline(iframe("<div>"))
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+ else:
+ session.url = inline("<div>")
+
+ element = session.find.css("div", all=False)
+
+ result = execute_async_script(session, """
+ const [elem, resolve] = arguments;
+ elem.remove();
+ resolve(elem);
+ """, args=[element])
+ assert_error(result, "stale element reference")
+
+
+@pytest.mark.parametrize("expression, expected_type", [
+ ("window.frames[0]", Frame),
+ ("document.getElementById('foo')", Element),
+ ("document.getElementById('checkbox').shadowRoot", ShadowRoot),
+ ("window", Window),
+], ids=["frame", "node", "shadow-root", "window"])
+def test_element_reference(session, iframe, inline, expression, expected_type):
+ session.url = inline(f"""
+ <style>
+ custom-checkbox-element {{
+ display:block; width:20px; height:20px;
+ }}
+ </style>
+ <custom-checkbox-element id='checkbox'></custom-checkbox-element>
+ <script>
+ customElements.define('custom-checkbox-element',
+ class extends HTMLElement {{
+ constructor() {{
+ super();
+ this.attachShadow({{mode: 'open'}}).innerHTML = `
+ <div><input type="checkbox"/></div>
+ `;
+ }}
+ }});
+ </script>
+ <div id="foo"/>
+ {iframe("<p>")}""")
+
+ result = execute_async_script(session, f"arguments[0]({expression})")
+ reference = assert_success(result)
+ assert isinstance(reference, expected_type)
+
+
+@pytest.mark.parametrize("expression", [
+ (""" document.querySelector("svg").attributes[0] """),
+ (""" document.querySelector("div#text-node").childNodes[1] """),
+ (""" document.querySelector("foo").childNodes[1] """),
+ (""" document.createProcessingInstruction("xml-stylesheet", "href='foo.css'") """),
+ (""" document.querySelector("div#comment").childNodes[0] """),
+ (""" document"""),
+ (""" document.doctype"""),
+], ids=["attribute", "text", "cdata", "processing_instruction", "comment", "document", "doctype"])
+def test_non_element_nodes(session, inline, expression):
+ session.url = inline(PAGE_DATA)
+
+ result = execute_async_script(session, f"arguments[0]({expression})")
+ assert_error(result, "javascript error")
diff --git a/testing/web-platform/tests/webdriver/tests/execute_async_script/objects.py b/testing/web-platform/tests/webdriver/tests/execute_async_script/objects.py
new file mode 100644
index 0000000000..edcf06505a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_async_script/objects.py
@@ -0,0 +1,49 @@
+from tests.support.asserts import assert_error, assert_success
+from . import execute_async_script
+
+
+def test_object(session):
+ response = execute_async_script(session, """
+ arguments[0]({
+ foo: 23,
+ bar: true,
+ });
+ """)
+ value = assert_success(response)
+ assert value == {"foo": 23, "bar": True}
+
+
+def test_nested_object(session):
+ response = execute_async_script(session, """
+ arguments[0]({
+ foo: {
+ cheese: 23,
+ },
+ bar: true,
+ });
+ """)
+ value = assert_success(response)
+ assert value == {"foo": {"cheese": 23}, "bar": True}
+
+
+def test_object_to_json(session):
+ response = execute_async_script(session, """
+ arguments[0]({
+ toJSON() {
+ return ["foo", "bar"];
+ }
+ });
+ """)
+ value = assert_success(response)
+ assert value == ["foo", "bar"]
+
+
+def test_object_to_json_exception(session):
+ response = execute_async_script(session, """
+ arguments[0]({
+ toJSON() {
+ throw Error("fail");
+ }
+ });
+ """)
+ value = assert_error(response, "javascript error")
diff --git a/testing/web-platform/tests/webdriver/tests/execute_async_script/promise.py b/testing/web-platform/tests/webdriver/tests/execute_async_script/promise.py
new file mode 100644
index 0000000000..d726d0d712
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_async_script/promise.py
@@ -0,0 +1,118 @@
+from tests.support.asserts import assert_error, assert_success
+from . import execute_async_script
+
+
+def test_promise_resolve(session):
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ resolve(Promise.resolve('foobar'));
+ """)
+ assert_success(response, "foobar")
+
+
+def test_promise_resolve_delayed(session):
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ let promise = new Promise(
+ (resolve) => setTimeout(
+ () => resolve('foobar'),
+ 50
+ )
+ );
+ resolve(promise);
+ """)
+ assert_success(response, "foobar")
+
+
+def test_promise_all_resolve(session):
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ let promise = Promise.all([
+ Promise.resolve(1),
+ Promise.resolve(2)
+ ]);
+ resolve(promise);
+ """)
+ assert_success(response, [1, 2])
+
+
+def test_await_promise_resolve(session):
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ let res = await Promise.resolve('foobar');
+ resolve(res);
+ """)
+ assert_success(response, "foobar")
+
+
+def test_promise_resolve_timeout(session):
+ session.timeouts.script = .1
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ let promise = new Promise(
+ (resolve) => setTimeout(
+ () => resolve(),
+ 1000
+ )
+ );
+ resolve(promise);
+ """)
+ assert_error(response, "script timeout")
+
+
+def test_promise_reject(session):
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ resolve(Promise.reject(new Error('my error')));
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_promise_reject_delayed(session):
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ let promise = new Promise(
+ (resolve, reject) => setTimeout(
+ () => reject(new Error('my error')),
+ 50
+ )
+ );
+ resolve(promise);
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_promise_all_reject(session):
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ let promise = Promise.all([
+ Promise.resolve(1),
+ Promise.reject(new Error('error'))
+ ]);
+ resolve(promise);
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_await_promise_reject(session):
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ await Promise.reject(new Error('my error'));
+ resolve('foo');
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_promise_reject_timeout(session):
+ session.timeouts.script = .1
+ response = execute_async_script(session, """
+ let resolve = arguments[0];
+ let promise = new Promise(
+ (resolve, reject) => setTimeout(
+ () => reject(new Error('my error')),
+ 1000
+ )
+ );
+ resolve(promise);
+ """)
+ assert_error(response, "script timeout")
diff --git a/testing/web-platform/tests/webdriver/tests/execute_async_script/properties.py b/testing/web-platform/tests/webdriver/tests/execute_async_script/properties.py
new file mode 100644
index 0000000000..b9592e7edd
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_async_script/properties.py
@@ -0,0 +1,64 @@
+from tests.support.asserts import assert_same_element, assert_success
+from . import execute_async_script
+
+
+def test_content_attribute(session, inline):
+ session.url = inline("<input value=foobar>")
+ response = execute_async_script(session, """
+ const resolve = arguments[0];
+ const input = document.querySelector("input");
+ resolve(input.value);
+ """)
+ assert_success(response, "foobar")
+
+
+def test_idl_attribute(session, inline):
+ session.url = inline("""
+ <input>
+ <script>
+ const input = document.querySelector("input");
+ input.value = "foobar";
+ </script>
+ """)
+ response = execute_async_script(session, """
+ const resolve = arguments[0];
+ const input = document.querySelector("input");
+ resolve(input.value);
+ """)
+ assert_success(response, "foobar")
+
+
+def test_idl_attribute_element(session, inline):
+ session.url = inline("""
+ <p>foo
+ <p>bar
+
+ <script>
+ const elements = document.querySelectorAll("p");
+ let foo = elements[0];
+ let bar = elements[1];
+ foo.bar = bar;
+ </script>
+ """)
+ _foo, bar = session.find.css("p")
+ response = execute_async_script(session, """
+ const resolve = arguments[0];
+ const foo = document.querySelector("p");
+ resolve(foo.bar);
+ """)
+ value = assert_success(response)
+ assert_same_element(session, bar, value)
+
+
+def test_script_defining_property(session, inline):
+ session.url = inline("<input>")
+ session.execute_script("""
+ const input = document.querySelector("input");
+ input.foobar = "foobar";
+ """)
+ response = execute_async_script(session, """
+ const resolve = arguments[0];
+ const input = document.querySelector("input");
+ resolve(input.foobar);
+ """)
+ assert_success(response, "foobar")
diff --git a/testing/web-platform/tests/webdriver/tests/execute_async_script/user_prompts.py b/testing/web-platform/tests/webdriver/tests/execute_async_script/user_prompts.py
new file mode 100644
index 0000000000..e39574fd4a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_async_script/user_prompts.py
@@ -0,0 +1,109 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+from . import execute_async_script
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = execute_async_script(session, "window.result = 1; arguments[0](1);")
+ assert_success(response, 1)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.execute_script("return window.result;") == 1
+
+ return check_user_prompt_closed_without_exception
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = execute_async_script(session, "window.result = 1; arguments[0](1);")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.execute_script("return window.result;") is None
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = execute_async_script(session, "window.result = 1; arguments[0](1);")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert session.execute_script("return window.result;") is None
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/execute_script/__init__.py b/testing/web-platform/tests/webdriver/tests/execute_script/__init__.py
new file mode 100644
index 0000000000..1ab36eb054
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_script/__init__.py
@@ -0,0 +1,16 @@
+import webdriver.protocol as protocol
+
+
+def execute_script(session, script, args=None):
+ if args is None:
+ args = []
+ body = {"script": script, "args": args}
+
+ return session.transport.send(
+ "POST",
+ "/session/{session_id}/execute/sync".format(**vars(session)),
+ body,
+ encoder=protocol.Encoder,
+ decoder=protocol.Decoder,
+ session=session,
+ )
diff --git a/testing/web-platform/tests/webdriver/tests/execute_script/arguments.py b/testing/web-platform/tests/webdriver/tests/execute_script/arguments.py
new file mode 100644
index 0000000000..5024eacf3f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_script/arguments.py
@@ -0,0 +1,91 @@
+import pytest
+
+from webdriver.client import Element, Frame, ShadowRoot, Window
+
+from tests.support.asserts import assert_error, assert_success
+from . import execute_script
+
+
+def test_null(session):
+ value = None
+ result = execute_script(session, "return [arguments[0] === null, arguments[0]]", args=[value])
+ actual = assert_success(result)
+
+ assert actual[0] is True
+ assert actual[1] == value
+
+
+@pytest.mark.parametrize("value, expected_type", [
+ (True, "boolean"),
+ (42, "number"),
+ ("foo", "string"),
+], ids=["boolean", "number", "string"])
+def test_primitives(session, value, expected_type):
+ result = execute_script(session, "return [typeof arguments[0], arguments[0]]", args=[value])
+ actual = assert_success(result)
+
+ assert actual[0] == expected_type
+ assert actual[1] == value
+
+
+def test_collection(session):
+ value = [1, 2, 3]
+ result = execute_script(session, "return [Array.isArray(arguments[0]), arguments[0]]", args=[value])
+ actual = assert_success(result)
+
+ assert actual[0] is True
+ assert actual[1] == value
+
+
+def test_object(session):
+ value = {"foo": "bar", "cheese": 23}
+ result = execute_script(session, "return [typeof arguments[0], arguments[0]]", args=[value])
+ actual = assert_success(result)
+
+ assert actual[0] == "object"
+ assert actual[1] == value
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<div>", "div", as_frame=as_frame)
+
+ result = execute_script(session, "return 1;", args=[element])
+ assert_error(result, "stale element reference")
+
+
+@pytest.mark.parametrize("expression, expected_type, expected_class", [
+ ("window.frames[0]", Frame, "Frame"),
+ ("document.getElementById('foo')", Element, "HTMLDivElement"),
+ ("document.getElementById('checkbox').shadowRoot", ShadowRoot, "ShadowRoot"),
+ ("window", Window, "Window")
+], ids=["frame", "node", "shadow-root", "window"])
+def test_element_reference(session, iframe, inline, expression, expected_type, expected_class):
+ session.url = inline(f"""
+ <style>
+ custom-checkbox-element {{
+ display:block; width:20px; height:20px;
+ }}
+ </style>
+ <custom-checkbox-element id='checkbox'></custom-checkbox-element>
+ <script>
+ customElements.define('custom-checkbox-element',
+ class extends HTMLElement {{
+ constructor() {{
+ super();
+ this.attachShadow({{mode: 'open'}}).innerHTML = `
+ <div><input type="checkbox"/></div>
+ `;
+ }}
+ }});
+ </script>
+ <div id="foo"/>
+ {iframe("<p>")}""")
+
+ result = execute_script(session, f"return {expression}")
+ reference = assert_success(result)
+ assert isinstance(reference, expected_type)
+
+ result = execute_script(session, "return arguments[0].constructor.name", [reference])
+ assert_success(result, expected_class)
+
diff --git a/testing/web-platform/tests/webdriver/tests/execute_script/collections.py b/testing/web-platform/tests/webdriver/tests/execute_script/collections.py
new file mode 100644
index 0000000000..3a6c2470c9
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_script/collections.py
@@ -0,0 +1,128 @@
+import os
+
+from tests.support.asserts import assert_same_element, assert_success
+from . import execute_script
+
+
+def test_arguments(session):
+ response = execute_script(session, """
+ function func() {
+ return arguments;
+ }
+ return func("foo", "bar");
+ """)
+ assert_success(response, [u"foo", u"bar"])
+
+
+def test_array(session):
+ response = execute_script(session, "return [1, 2]")
+ assert_success(response, [1, 2])
+
+
+def test_file_list(session, tmpdir, inline):
+ files = [tmpdir.join("foo.txt"), tmpdir.join("bar.txt")]
+
+ session.url = inline("<input type=file multiple>")
+ upload = session.find.css("input", all=False)
+ for file in files:
+ file.write("morn morn")
+ upload.send_keys(str(file))
+
+ response = execute_script(session, "return document.querySelector('input').files")
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == len(files)
+ for expected, actual in zip(files, value):
+ assert isinstance(actual, dict)
+ assert "name" in actual
+ assert isinstance(actual["name"], str)
+ assert os.path.basename(str(expected)) == actual["name"]
+
+
+def test_html_all_collection(session, inline):
+ session.url = inline("""
+ <p>foo
+ <p>bar
+ """)
+ html = session.find.css("html", all=False)
+ head = session.find.css("head", all=False)
+ meta = session.find.css("meta", all=False)
+ body = session.find.css("body", all=False)
+ ps = session.find.css("p")
+
+ response = execute_script(session, "return document.all")
+ value = assert_success(response)
+ assert isinstance(value, list)
+ # <html>, <head>, <meta>, <body>, <p>, <p>
+ assert len(value) == 6
+
+ assert_same_element(session, html, value[0])
+ assert_same_element(session, head, value[1])
+ assert_same_element(session, meta, value[2])
+ assert_same_element(session, body, value[3])
+ assert_same_element(session, ps[0], value[4])
+ assert_same_element(session, ps[1], value[5])
+
+
+def test_html_collection(session, inline):
+ session.url = inline("""
+ <p>foo
+ <p>bar
+ """)
+ ps = session.find.css("p")
+
+ response = execute_script(session, "return document.getElementsByTagName('p')")
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 2
+ for expected, actual in zip(ps, value):
+ assert_same_element(session, expected, actual)
+
+
+def test_html_form_controls_collection(session, inline):
+ session.url = inline("""
+ <form>
+ <input>
+ <input>
+ </form>
+ """)
+ inputs = session.find.css("input")
+
+ response = execute_script(session, "return document.forms[0].elements")
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 2
+ for expected, actual in zip(inputs, value):
+ assert_same_element(session, expected, actual)
+
+
+def test_html_options_collection(session, inline):
+ session.url = inline("""
+ <select>
+ <option>
+ <option>
+ </select>
+ """)
+ options = session.find.css("option")
+
+ response = execute_script(session, "return document.querySelector('select').options")
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 2
+ for expected, actual in zip(options, value):
+ assert_same_element(session, expected, actual)
+
+
+def test_node_list(session, inline):
+ session.url = inline("""
+ <p>foo
+ <p>bar
+ """)
+ ps = session.find.css("p")
+
+ response = execute_script(session, "return document.querySelectorAll('p')")
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 2
+ for expected, actual in zip(ps, value):
+ assert_same_element(session, expected, actual)
diff --git a/testing/web-platform/tests/webdriver/tests/execute_script/cyclic.py b/testing/web-platform/tests/webdriver/tests/execute_script/cyclic.py
new file mode 100644
index 0000000000..29db2f27e6
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_script/cyclic.py
@@ -0,0 +1,78 @@
+from tests.support.asserts import assert_error, assert_same_element, assert_success
+from . import execute_script
+
+
+def test_collection_self_reference(session):
+ response = execute_script(session, """
+ let arr = [];
+ arr.push(arr);
+ return arr;
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_element_self_reference(session, inline):
+ session.url = inline("<div></div>")
+ div = session.find.css("div", all=False)
+
+ response = execute_script(session, """
+ let div = document.querySelector("div");
+ div.reference = div;
+ return div;
+ """)
+ value = assert_success(response)
+ assert_same_element(session, value, div)
+
+
+def test_object_self_reference(session):
+ response = execute_script(session, """
+ let obj = {};
+ obj.reference = obj;
+ return obj;
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_collection_self_reference_in_object(session):
+ response = execute_script(session, """
+ let arr = [];
+ arr.push(arr);
+ return {'value': arr};
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_object_self_reference_in_collection(session):
+ response = execute_script(session, """
+ let obj = {};
+ obj.reference = obj;
+ return [obj];
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_element_self_reference_in_collection(session, inline):
+ session.url = inline("<div></div>")
+ divs = session.find.css("div")
+
+ response = execute_script(session, """
+ let div = document.querySelector("div");
+ div.reference = div;
+ return [div];
+ """)
+ value = assert_success(response)
+ for expected, actual in zip(divs, value):
+ assert_same_element(session, expected, actual)
+
+
+def test_element_self_reference_in_object(session, inline):
+ session.url = inline("<div></div>")
+ div = session.find.css("div", all=False)
+
+ response = execute_script(session, """
+ let div = document.querySelector("div");
+ div.reference = div;
+ return {foo: div};
+ """)
+ value = assert_success(response)
+ assert_same_element(session, div, value["foo"])
diff --git a/testing/web-platform/tests/webdriver/tests/execute_script/execute.py b/testing/web-platform/tests/webdriver/tests/execute_script/execute.py
new file mode 100644
index 0000000000..8804daf59f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_script/execute.py
@@ -0,0 +1,134 @@
+import pytest
+
+from webdriver import Element
+from webdriver.error import NoSuchAlertException
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.sync import Poll
+from . import execute_script
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/execute/sync".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = execute_script(session, "return 1;")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = execute_script(session, "return 1;")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ result = execute_script(session, "return true;", args=[element])
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ result = execute_script(session, "return true;", args=[element])
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ result = execute_script(session, "return true;", args=[button])
+ assert_error(result, "no such element")
+
+
+def test_opening_new_window_keeps_current_window_handle(session, inline):
+ original_handle = session.window_handle
+ original_handles = session.handles
+
+ url = inline("""<a href="javascript:window.open();">open window</a>""")
+ session.url = url
+ session.find.css("a", all=False).click()
+ wait = Poll(
+ session,
+ timeout=5,
+ message="No new window has been opened")
+ new_handles = wait.until(lambda s: set(s.handles) - set(original_handles))
+
+ assert len(new_handles) == 1
+ assert session.window_handle == original_handle
+ assert session.url == url
+
+
+def test_ending_comment(session):
+ response = execute_script(session, "return 1; // foo")
+ assert_success(response, 1)
+
+
+def test_override_listeners(session, inline):
+ session.url = inline("""
+<script>
+called = [];
+window.addEventListener = () => {called.push("Internal addEventListener")}
+window.removeEventListener = () => {called.push("Internal removeEventListener")}
+</script>
+})""")
+ response = execute_script(session, "return !window.onunload")
+ assert_success(response, True)
+ response = execute_script(session, "return called")
+ assert_success(response, [])
+
+
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_abort_by_user_prompt(session, dialog_type):
+ response = execute_script(
+ session, "window.{}('Hello'); return 1;".format(dialog_type))
+ assert_success(response, None)
+
+ session.alert.accept()
+
+
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_abort_by_user_prompt_twice(session, dialog_type):
+ response = execute_script(
+ session, "window.{0}('Hello'); window.{0}('Bye'); return 1;".format(dialog_type))
+ assert_success(response, None)
+
+ session.alert.accept()
+
+ # The first alert has been accepted by the user prompt handler, the second
+ # alert will still be opened because the current step isn't aborted.
+ wait = Poll(
+ session,
+ timeout=5,
+ message="Second alert has not been opened",
+ ignored_exceptions=NoSuchAlertException
+ )
+ text = wait.until(lambda s: s.alert.text)
+
+ assert text == "Bye"
+
+ session.alert.accept()
diff --git a/testing/web-platform/tests/webdriver/tests/execute_script/json_serialize_windowproxy.py b/testing/web-platform/tests/webdriver/tests/execute_script/json_serialize_windowproxy.py
new file mode 100644
index 0000000000..8e76feda23
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_script/json_serialize_windowproxy.py
@@ -0,0 +1,51 @@
+import json
+
+from tests.support.asserts import assert_success
+from . import execute_script
+
+_window_id = "window-fcc6-11e5-b4f8-330a88ab9d7f"
+_frame_id = "frame-075b-4da1-b6ba-e579c2d3230a"
+
+
+def test_initial_window(session):
+ # non-auxiliary top-level browsing context
+ response = execute_script(session, "return window;")
+ raw_json = assert_success(response)
+
+ obj = json.loads(raw_json)
+ assert len(obj) == 1
+ assert _window_id in obj
+ handle = obj[_window_id]
+ assert handle in session.window_handles
+
+
+def test_window_open(session):
+ # auxiliary browsing context
+ session.execute_script("window.foo = window.open()")
+
+ response = execute_script(session, "return window.foo;")
+ raw_json = assert_success(response)
+
+ obj = json.loads(raw_json)
+ assert len(obj) == 1
+ assert _window_id in obj
+ handle = obj[_window_id]
+ assert handle in session.window_handles
+
+
+def test_frame(session):
+ # nested browsing context
+ append = """
+ window.frame = document.createElement('iframe');
+ document.body.appendChild(frame);
+ """
+ session.execute_script(append)
+
+ response = execute_script(session, "return frame.contentWindow;")
+ raw_json = assert_success(response)
+
+ obj = json.loads(raw_json)
+ assert len(obj) == 1
+ assert _frame_id in obj
+ handle = obj[_frame_id]
+ assert handle not in session.window_handles
diff --git a/testing/web-platform/tests/webdriver/tests/execute_script/node.py b/testing/web-platform/tests/webdriver/tests/execute_script/node.py
new file mode 100644
index 0000000000..83061222b0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_script/node.py
@@ -0,0 +1,86 @@
+import pytest
+
+from webdriver.client import Element, Frame, ShadowRoot, Window
+from tests.support.asserts import assert_error, assert_success
+from . import execute_script
+
+
+PAGE_DATA = """
+ <div id="deep"><p><span></span></p><br/></div>
+ <div id="text-node"><p></p>Lorem</div>
+ <br/>
+ <svg id="foo"></svg>
+ <div id="comment"><!-- Comment --></div>
+ <script>
+ var svg = document.querySelector("svg");
+ svg.setAttributeNS("http://www.w3.org/2000/svg", "svg:foo", "bar");
+ </script>
+"""
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, iframe, inline, as_frame):
+ if as_frame:
+ session.url = inline(iframe("<div>"))
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+ else:
+ session.url = inline("<div>")
+
+ element = session.find.css("div", all=False)
+
+ result = execute_script(session, """
+ const elem = arguments[0];
+ elem.remove();
+ return elem;
+ """, args=[element])
+ assert_error(result, "stale element reference")
+
+
+@pytest.mark.parametrize("expression, expected_type", [
+ ("window.frames[0]", Frame),
+ ("document.getElementById('foo')", Element),
+ ("document.getElementById('checkbox').shadowRoot", ShadowRoot),
+ ("window", Window),
+], ids=["frame", "node", "shadow-root", "window"])
+def test_element_reference(session, iframe, inline, expression, expected_type):
+ session.url = inline(f"""
+ <style>
+ custom-checkbox-element {{
+ display:block; width:20px; height:20px;
+ }}
+ </style>
+ <custom-checkbox-element id='checkbox'></custom-checkbox-element>
+ <script>
+ customElements.define('custom-checkbox-element',
+ class extends HTMLElement {{
+ constructor() {{
+ super();
+ this.attachShadow({{mode: 'open'}}).innerHTML = `
+ <div><input type="checkbox"/></div>
+ `;
+ }}
+ }});
+ </script>
+ <div id="foo"/>
+ {iframe("<p>")}""")
+
+ result = execute_script(session, f"return {expression}")
+ reference = assert_success(result)
+ assert isinstance(reference, expected_type)
+
+
+@pytest.mark.parametrize("expression", [
+ (""" document.querySelector("svg").attributes[0] """),
+ (""" document.querySelector("div#text-node").childNodes[1] """),
+ (""" document.querySelector("foo").childNodes[1] """),
+ (""" document.createProcessingInstruction("xml-stylesheet", "href='foo.css'") """),
+ (""" document.querySelector("div#comment").childNodes[0] """),
+ (""" document"""),
+ (""" document.doctype"""),
+], ids=["attribute", "text", "cdata", "processing_instruction", "comment", "document", "doctype"])
+def test_non_element_nodes(session, inline, expression):
+ session.url = inline(PAGE_DATA)
+
+ result = execute_script(session, f"return {expression}")
+ assert_error(result, "javascript error")
diff --git a/testing/web-platform/tests/webdriver/tests/execute_script/objects.py b/testing/web-platform/tests/webdriver/tests/execute_script/objects.py
new file mode 100644
index 0000000000..e254fe275e
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_script/objects.py
@@ -0,0 +1,49 @@
+from tests.support.asserts import assert_error, assert_success
+from . import execute_script
+
+
+def test_object(session):
+ response = execute_script(session, """
+ return {
+ foo: 23,
+ bar: true,
+ };
+ """)
+ value = assert_success(response)
+ assert value == {"foo": 23, "bar": True}
+
+
+def test_nested_object(session):
+ response = execute_script(session, """
+ return {
+ foo: {
+ cheese: 23,
+ },
+ bar: true,
+ };
+ """)
+ value = assert_success(response)
+ assert value == {"foo": {"cheese": 23}, "bar": True}
+
+
+def test_object_to_json(session):
+ response = execute_script(session, """
+ return {
+ toJSON() {
+ return ["foo", "bar"];
+ }
+ };
+ """)
+ value = assert_success(response)
+ assert value == ["foo", "bar"]
+
+
+def test_object_to_json_exception(session):
+ response = execute_script(session, """
+ return {
+ toJSON() {
+ throw Error("fail");
+ }
+ };
+ """)
+ value = assert_error(response, "javascript error")
diff --git a/testing/web-platform/tests/webdriver/tests/execute_script/promise.py b/testing/web-platform/tests/webdriver/tests/execute_script/promise.py
new file mode 100644
index 0000000000..c206674bae
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_script/promise.py
@@ -0,0 +1,102 @@
+from tests.support.asserts import assert_error, assert_success
+from . import execute_script
+
+
+def test_promise_resolve(session):
+ response = execute_script(session, """
+ return Promise.resolve('foobar');
+ """)
+ assert_success(response, "foobar")
+
+
+def test_promise_resolve_delayed(session):
+ response = execute_script(session, """
+ return new Promise(
+ (resolve) => setTimeout(
+ () => resolve('foobar'),
+ 50
+ )
+ );
+ """)
+ assert_success(response, "foobar")
+
+
+def test_promise_all_resolve(session):
+ response = execute_script(session, """
+ return Promise.all([
+ Promise.resolve(1),
+ Promise.resolve(2)
+ ]);
+ """)
+ assert_success(response, [1, 2])
+
+
+def test_await_promise_resolve(session):
+ response = execute_script(session, """
+ let res = await Promise.resolve('foobar');
+ return res;
+ """)
+ assert_success(response, "foobar")
+
+
+def test_promise_resolve_timeout(session):
+ session.timeouts.script = .1
+ response = execute_script(session, """
+ return new Promise(
+ (resolve) => setTimeout(
+ () => resolve(),
+ 1000
+ )
+ );
+ """)
+ assert_error(response, "script timeout")
+
+
+def test_promise_reject(session):
+ response = execute_script(session, """
+ return Promise.reject(new Error('my error'));
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_promise_reject_delayed(session):
+ response = execute_script(session, """
+ return new Promise(
+ (resolve, reject) => setTimeout(
+ () => reject(new Error('my error')),
+ 50
+ )
+ );
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_promise_all_reject(session):
+ response = execute_script(session, """
+ return Promise.all([
+ Promise.resolve(1),
+ Promise.reject(new Error('error'))
+ ]);
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_await_promise_reject(session):
+ response = execute_script(session, """
+ await Promise.reject(new Error('my error'));
+ return 'foo';
+ """)
+ assert_error(response, "javascript error")
+
+
+def test_promise_reject_timeout(session):
+ session.timeouts.script = .1
+ response = execute_script(session, """
+ return new Promise(
+ (resolve, reject) => setTimeout(
+ () => reject(new Error('my error')),
+ 1000
+ )
+ );
+ """)
+ assert_error(response, "script timeout")
diff --git a/testing/web-platform/tests/webdriver/tests/execute_script/properties.py b/testing/web-platform/tests/webdriver/tests/execute_script/properties.py
new file mode 100644
index 0000000000..c3b01dea29
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_script/properties.py
@@ -0,0 +1,60 @@
+from tests.support.asserts import assert_same_element, assert_success
+from . import execute_script
+
+
+def test_content_attribute(session, inline):
+ session.url = inline("<input value=foobar>")
+ response = execute_script(session, """
+ const input = document.querySelector("input");
+ return input.value;
+ """)
+ assert_success(response, "foobar")
+
+
+def test_idl_attribute(session, inline):
+ session.url = inline("""
+ <input>
+ <script>
+ const input = document.querySelector("input");
+ input.value = "foobar";
+ </script>
+ """)
+ response = execute_script(session, """
+ const input = document.querySelector("input");
+ return input.value;
+ """)
+ assert_success(response, "foobar")
+
+
+def test_idl_attribute_element(session, inline):
+ session.url = inline("""
+ <p>foo
+ <p>bar
+
+ <script>
+ const elements = document.querySelectorAll("p");
+ let foo = elements[0];
+ let bar = elements[1];
+ foo.bar = bar;
+ </script>
+ """)
+ _foo, bar = session.find.css("p")
+ response = execute_script(session, """
+ const foo = document.querySelector("p");
+ return foo.bar;
+ """)
+ value = assert_success(response)
+ assert_same_element(session, bar, value)
+
+
+def test_script_defining_property(session, inline):
+ session.url = inline("<input>")
+ execute_script(session, """
+ const input = document.querySelector("input");
+ input.foobar = "foobar";
+ """)
+ response = execute_script(session, """
+ const input = document.querySelector("input");
+ return input.foobar;
+ """)
+ assert_success(response, "foobar")
diff --git a/testing/web-platform/tests/webdriver/tests/execute_script/user_prompts.py b/testing/web-platform/tests/webdriver/tests/execute_script/user_prompts.py
new file mode 100644
index 0000000000..48d735ea75
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/execute_script/user_prompts.py
@@ -0,0 +1,107 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+from . import execute_script
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = execute_script(session, "window.result = 1; return 1;")
+ assert_success(response, 1)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.execute_script("return window.result;") == 1
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = execute_script(session, "window.result = 1; return 1;")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.execute_script("return window.result;") is None
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = execute_script(session, "window.result = 1; return 1;")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert session.execute_script("return window.result;") is None
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/find_element/__init__.py b/testing/web-platform/tests/webdriver/tests/find_element/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_element/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/find_element/find.py b/testing/web-platform/tests/webdriver/tests/find_element/find.py
new file mode 100644
index 0000000000..d16372d025
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_element/find.py
@@ -0,0 +1,142 @@
+import pytest
+
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_same_element, assert_success
+
+
+def find_element(session, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/element".format(**vars(session)),
+ {"using": using, "value": value})
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/element".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = find_element(session, "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = find_element(session, "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+@pytest.mark.parametrize(
+ "value",
+ ["#same1", "#frame", "#shadow"],
+ ids=["not-existent", "existent-other-frame", "existent-shadow-dom"],
+)
+def test_no_such_element_with_invalid_value(session, iframe, inline, value):
+ session.url = inline(f"""
+ <style>
+ custom-checkbox-element {{
+ display:block; width:20px; height:20px;
+ }}
+ </style>
+ <script>
+ customElements.define('custom-checkbox-element',
+ class extends HTMLElement {{
+ constructor() {{
+ super();
+ this.attachShadow({{mode: 'open'}}).innerHTML = `
+ <div id="shadow"><input type="checkbox"/></div>
+ `;
+ }}
+ }});
+ </script>
+ <div id="top">
+ <div id="same"/>
+ {iframe("<div id='frame'>")}
+ <custom-checkbox-element id='checkbox'></custom-checkbox-element>
+ </div>""")
+
+ response = find_element(session, "css selector", value)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("using", ["a", True, None, 1, [], {}])
+def test_invalid_using_argument(session, using):
+ response = find_element(session, using, "value")
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("value", [None, [], {}])
+def test_invalid_selector_argument(session, value):
+ response = find_element(session, "css selector", value)
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", "#linkText"),
+ ("link text", "full link text"),
+ ("partial link text", "link text"),
+ ("tag name", "a"),
+ ("xpath", "//a")])
+def test_find_element(session, inline, using, value):
+ session.url = inline("<a href=# id=linkText>full link text</a>")
+
+ response = find_element(session, using, value)
+ assert_success(response)
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>link text</a>", "link text"),
+ ("<a href=#>&nbsp;link text&nbsp;</a>", "link text"),
+ ("<a href=#>link<br>text</a>", "link\ntext"),
+ ("<a href=#>link&amp;text</a>", "link&text"),
+ ("<a href=#>LINK TEXT</a>", "LINK TEXT"),
+ ("<a href=# style='text-transform: uppercase'>link text</a>", "LINK TEXT"),
+])
+def test_find_element_link_text(session, inline, document, value):
+ session.url = inline(document)
+
+ response = find_element(session, "link text", value)
+ assert_success(response)
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>partial link text</a>", "link"),
+ ("<a href=#>&nbsp;partial link text&nbsp;</a>", "link"),
+ ("<a href=#>partial link text</a>", "k t"),
+ ("<a href=#>partial link<br>text</a>", "k\nt"),
+ ("<a href=#>partial link&amp;text</a>", "k&t"),
+ ("<a href=#>PARTIAL LINK TEXT</a>", "LINK"),
+ ("<a href=# style='text-transform: uppercase'>partial link text</a>", "LINK"),
+])
+def test_find_element_partial_link_text(session, inline, document, value):
+ session.url = inline(document)
+
+ response = find_element(session, "partial link text", value)
+ assert_success(response)
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", "#linkText"),
+ ("link text", "full link text"),
+ ("partial link text", "link text"),
+ ("tag name", "a"),
+ ("xpath", "//*[name()='a']")])
+def test_xhtml_namespace(session, inline, using, value):
+ session.url = inline("""<a href="#" id="linkText">full link text</a>""",
+ doctype="xhtml")
+ expected = session.execute_script("return document.links[0]")
+
+ response = find_element(session, using, value)
+ value = assert_success(response)
+ assert_same_element(session, value, expected)
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", ":root"),
+ ("tag name", "html"),
+ ("xpath", "/html")])
+def test_htmldocument(session, inline, using, value):
+ session.url = inline("")
+ response = find_element(session, using, value)
+ assert_success(response)
diff --git a/testing/web-platform/tests/webdriver/tests/find_element/user_prompts.py b/testing/web-platform/tests/webdriver/tests/find_element/user_prompts.py
new file mode 100644
index 0000000000..ada8e8ebee
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_element/user_prompts.py
@@ -0,0 +1,120 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import (
+ assert_error,
+ assert_same_element,
+ assert_success,
+ assert_dialog_handled,
+)
+
+
+def find_element(session, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/element".format(**vars(session)),
+ {"using": using, "value": value})
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<p>bar</p>")
+ element = session.find.css("p", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_element(session, "css selector", "p")
+ value = assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert_same_element(session, value, element)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<p>bar</p>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_element(session, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<p>bar</p>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_element(session, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/find_element_from_element/__init__.py b/testing/web-platform/tests/webdriver/tests/find_element_from_element/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_element_from_element/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/find_element_from_element/find.py b/testing/web-platform/tests/webdriver/tests/find_element_from_element/find.py
new file mode 100644
index 0000000000..811971aeae
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_element_from_element/find.py
@@ -0,0 +1,191 @@
+import pytest
+
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_same_element, assert_success
+
+
+def find_element(session, element_id, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/element".format(
+ session_id=session.session_id,
+ element_id=element_id),
+ {"using": using, "value": value})
+
+
+def test_null_parameter_value(session, http, inline):
+ session.url = inline("<div><a href=# id=linkText>full link text</a></div>")
+ element = session.find.css("div", all=False)
+
+ path = "/session/{session_id}/element/{element_id}/element".format(
+ session_id=session.session_id, element_id=element.id)
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = find_element(session, "notReal", "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = find_element(session, "notReal", "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+@pytest.mark.parametrize(
+ "value",
+ ["#doesNotExist", "#frame", "#shadow"],
+ ids=["not-existent", "existent-other-frame", "existent-shadow-dom"],
+)
+def test_no_such_element_with_invalid_value(session, iframe, inline, value):
+ session.url = inline(f"""
+ <style>
+ custom-checkbox-element {{
+ display:block; width:20px; height:20px;
+ }}
+ </style>
+ <script>
+ customElements.define('custom-checkbox-element',
+ class extends HTMLElement {{
+ constructor() {{
+ super();
+ this.attachShadow({{mode: 'open'}}).innerHTML = `
+ <div id="shadow"><input type="checkbox"/></div>
+ `;
+ }}
+ }});
+ </script>
+ <div id="top">
+ <div id="same"/>
+ {iframe("<div id='frame'>")}
+ <custom-checkbox-element id='checkbox'></custom-checkbox-element>
+ </div>""")
+
+ from_element = session.find.css("#top", all=False)
+ response = find_element(session, from_element.id, "css selector", value)
+ assert_error(response, "no such element")
+
+
+def test_no_such_element_with_startnode_from_other_window_handle(session, inline):
+ session.url = inline("<div id='parent'><p/>")
+ from_element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+ session.window_handle = new_handle
+
+ response = find_element(session, from_element.id, "css selector", "p")
+ assert_error(response, "no such element")
+
+
+def test_no_such_element_with_startnode_from_other_frame(session, iframe, inline):
+ session.url = inline(iframe("<div id='parent'><p/>"))
+
+ session.switch_frame(0)
+ from_element = session.find.css("#parent", all=False)
+ session.switch_frame("parent")
+
+ response = find_element(session, from_element.id, "css selector", "p")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<div><p>foo</p></div>", "div", as_frame=as_frame)
+
+ response = find_element(session, element.id, "css selector", "p")
+ assert_error(response, "stale element reference")
+
+
+@pytest.mark.parametrize("using", ["a", True, None, 1, [], {}])
+def test_invalid_using_argument(session, using):
+ response = find_element(session, "notReal", using, "value")
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("value", [None, [], {}])
+def test_invalid_selector_argument(session, value):
+ response = find_element(session, "notReal", "css selector", value)
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", "#linkText"),
+ ("link text", "full link text"),
+ ("partial link text", "link text"),
+ ("tag name", "a"),
+ ("xpath", "//a")])
+def test_find_element(session, inline, using, value):
+ session.url = inline("<div><a href=# id=linkText>full link text</a></div>")
+ element = session.find.css("div", all=False)
+ response = find_element(session, element.id, using, value)
+ assert_success(response)
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>link text</a>", "link text"),
+ ("<a href=#>&nbsp;link text&nbsp;</a>", "link text"),
+ ("<a href=#>link<br>text</a>", "link\ntext"),
+ ("<a href=#>link&amp;text</a>", "link&text"),
+ ("<a href=#>LINK TEXT</a>", "LINK TEXT"),
+ ("<a href=# style='text-transform: uppercase'>link text</a>", "LINK TEXT"),
+])
+def test_find_element_link_text(session, inline, document, value):
+ # Step 8 - 9
+ session.url = inline("<div>{0}</div>".format(document))
+ element = session.find.css("div", all=False)
+
+ response = find_element(session, element.id, "link text", value)
+ assert_success(response)
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>partial link text</a>", "link"),
+ ("<a href=#>&nbsp;partial link text&nbsp;</a>", "link"),
+ ("<a href=#>partial link text</a>", "k t"),
+ ("<a href=#>partial link<br>text</a>", "k\nt"),
+ ("<a href=#>partial link&amp;text</a>", "k&t"),
+ ("<a href=#>PARTIAL LINK TEXT</a>", "LINK"),
+ ("<a href=# style='text-transform: uppercase'>partial link text</a>", "LINK"),
+])
+def test_find_element_partial_link_text(session, inline, document, value):
+ session.url = inline("<div>{0}</div>".format(document))
+ element = session.find.css("div", all=False)
+
+ response = find_element(session, element.id, "partial link text", value)
+ assert_success(response)
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", "#linkText"),
+ ("link text", "full link text"),
+ ("partial link text", "link text"),
+ ("tag name", "a"),
+ ("xpath", "//*[name()='a']")])
+def test_xhtml_namespace(session, inline, using, value):
+ session.url = inline("""<p><a href="#" id="linkText">full link text</a></p>""",
+ doctype="xhtml")
+ from_element = session.execute_script("""return document.querySelector("p")""")
+ expected = session.execute_script("return document.links[0]")
+
+ response = find_element(session, from_element.id, using, value)
+ value = assert_success(response)
+ assert_same_element(session, value, expected)
+
+
+def test_parent_htmldocument(session, inline):
+ session.url = inline("")
+ from_element = session.execute_script("""return document.querySelector("body")""")
+ expected = session.execute_script("return document.documentElement")
+
+ response = find_element(session, from_element.id, "xpath", "..")
+ value = assert_success(response)
+ assert_same_element(session, value, expected)
+
+
+def test_parent_of_document_node_errors(session, inline):
+ session.url = inline("")
+ from_element = session.execute_script("return document.documentElement")
+
+ response = find_element(session, from_element.id, "xpath", "..")
+ assert_error(response, "invalid selector")
diff --git a/testing/web-platform/tests/webdriver/tests/find_element_from_element/user_prompts.py b/testing/web-platform/tests/webdriver/tests/find_element_from_element/user_prompts.py
new file mode 100644
index 0000000000..0537a78618
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_element_from_element/user_prompts.py
@@ -0,0 +1,125 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import (
+ assert_error,
+ assert_same_element,
+ assert_success,
+ assert_dialog_handled,
+)
+
+
+def find_element(session, element_id, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/element".format(
+ session_id=session.session_id,
+ element_id=element_id),
+ {"using": using, "value": value})
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<div><p>bar</p><div>")
+ outer_element = session.find.css("div", all=False)
+ inner_element = session.find.css("p", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_element(session, outer_element.id, "css selector", "p")
+ value = assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert_same_element(session, value, inner_element)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<div><p>bar</p><div>")
+ outer_element = session.find.css("div", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_element(session, outer_element.id, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<div><p>bar</p><div>")
+ outer_element = session.find.css("div", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_element(session, outer_element.id, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/__init__.py b/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/conftest.py b/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/conftest.py
new file mode 100644
index 0000000000..5fd5f8a065
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/conftest.py
@@ -0,0 +1,19 @@
+import pytest
+
+@pytest.fixture
+def get_shadow_page():
+ def get_shadow_page(shadow_content):
+ return """
+ <custom-shadow-element></custom-shadow-element>
+ <script>
+ customElements.define('custom-shadow-element',
+ class extends HTMLElement {{
+ constructor() {{
+ super();
+ this.attachShadow({{mode: 'open'}}).innerHTML = `
+ {{ {0} }}
+ `;
+ }}
+ }});
+ </script>""".format(shadow_content)
+ return get_shadow_page
diff --git a/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/find.py b/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/find.py
new file mode 100644
index 0000000000..833291da72
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/find.py
@@ -0,0 +1,193 @@
+import pytest
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_same_element, assert_success
+
+
+def find_element(session, shadow_id, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/shadow/{shadow_id}/element".format(
+ session_id=session.session_id,
+ shadow_id=shadow_id),
+ {"using": using, "value": value})
+
+
+def test_null_parameter_value(session, http, inline, get_shadow_page):
+ session.url = inline(get_shadow_page("<div><a href=# id=linkText>full link text</a></div>"))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+
+ path = "/session/{session_id}/shadow/{shadow_id}/element".format(
+ session_id=session.session_id, shadow_id=shadow_root.id)
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = find_element(session, "notReal", "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = find_element(session, "notReal", "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_unknown_shadow_root(session, inline, get_shadow_page):
+ session.url = inline(get_shadow_page("<div><input type='checkbox'/></div>"))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+
+ session.url = inline("<p>")
+
+ result = find_element(session, shadow_root.id, "css selector", "input")
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize(
+ "value",
+ ["#doesNotExist", "#inner"],
+ ids=["not-existent", "existent-inner-shadow-dom"],
+)
+def test_no_such_element_with_invalid_value(
+ session, iframe, inline, get_shadow_page, value
+):
+ session.url = inline(get_shadow_page(f"""
+ <div id="outer"/>
+ {get_shadow_page("<div id='inner'>")}
+ """))
+
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+
+ response = find_element(session, shadow_root.id, "css selector", value)
+ assert_error(response, "no such element")
+
+
+def test_no_such_element_with_shadow_root_from_other_window_handle(
+ session, inline, get_shadow_page
+):
+ session.url = inline(get_shadow_page("<div>"))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+
+ new_handle = session.new_window()
+ session.window_handle = new_handle
+
+ response = find_element(session, shadow_root.id, "css selector", "div")
+ assert_error(response, "no such element")
+
+
+def test_no_such_element_with_shadow_root_from_other_frame(
+ session, iframe, inline, get_shadow_page
+):
+ session.url = inline(iframe(get_shadow_page("<div>")))
+
+ session.switch_frame(0)
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+ session.switch_frame("parent")
+
+ response = find_element(session, shadow_root.id, "css selector", "div")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_detached_shadow_root(session, iframe, inline, get_shadow_page, as_frame):
+ page = get_shadow_page("<div><input type='checkbox'/></div>")
+
+ if as_frame:
+ session.url = inline(iframe(page))
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+ else:
+ session.url = inline(page)
+
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+
+ session.execute_script("arguments[0].remove();", args=[custom_element])
+
+ response = find_element(session, shadow_root.id, "css selector", "input")
+ assert_error(response, "detached shadow root")
+
+
+@pytest.mark.parametrize("using", ["a", True, None, 1, [], {}])
+def test_invalid_using_argument(session, using):
+ response = find_element(session, "notReal", using, "value")
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("value", [None, [], {}])
+def test_invalid_selector_argument(session, value):
+ response = find_element(session, "notReal", "css selector", value)
+ assert_error(response, "invalid argument")
+
+
+def test_found_element_equivalence(session, inline, get_shadow_page):
+ session.url = inline(get_shadow_page("<div><input type='checkbox'/></div>"))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ expected = session.execute_script("return arguments[0].shadowRoot.querySelector('input')",
+ args=(custom_element,))
+ shadow_root = custom_element.shadow_root
+ response = find_element(session, shadow_root.id, "css selector", "input")
+ value = assert_success(response)
+ assert_same_element(session, value, expected)
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", "#linkText"),
+ ("link text", "full link text"),
+ ("partial link text", "link text"),
+ ("tag name", "a"),
+ ("xpath", "//a")])
+def test_find_element(session, inline, get_shadow_page, using, value):
+ session.url = inline(get_shadow_page("<div><a href=# id=linkText>full link text</a></div>"))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ expected = session.execute_script("return arguments[0].shadowRoot.querySelector('#linkText')",
+ args=(custom_element,))
+ shadow_root = custom_element.shadow_root
+ response = find_element(session, shadow_root.id, using, value)
+ value = assert_success(response)
+ assert_same_element(session, value, expected)
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>link text</a>", "link text"),
+ ("<a href=#>&nbsp;link text&nbsp;</a>", "link text"),
+ ("<a href=#>link<br>text</a>", "link\ntext"),
+ ("<a href=#>link&amp;text</a>", "link&text"),
+ ("<a href=#>LINK TEXT</a>", "LINK TEXT"),
+ ("<a href=# style='text-transform: uppercase'>link text</a>", "LINK TEXT"),
+])
+def test_find_element_link_text(session, inline, get_shadow_page, document, value):
+ session.url = inline(get_shadow_page("<div>{0}</div>".format(document)))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ expected = session.execute_script("return arguments[0].shadowRoot.querySelectorAll('a')[0]",
+ args=(custom_element,))
+ shadow_root = custom_element.shadow_root
+
+ response = find_element(session, shadow_root.id, "link text", value)
+ value = assert_success(response)
+ assert_same_element(session, value, expected)
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>partial link text</a>", "link"),
+ ("<a href=#>&nbsp;partial link text&nbsp;</a>", "link"),
+ ("<a href=#>partial link text</a>", "k t"),
+ ("<a href=#>partial link<br>text</a>", "k\nt"),
+ ("<a href=#>partial link&amp;text</a>", "k&t"),
+ ("<a href=#>PARTIAL LINK TEXT</a>", "LINK"),
+ ("<a href=# style='text-transform: uppercase'>partial link text</a>", "LINK"),
+])
+def test_find_element_partial_link_text(session, inline, get_shadow_page, document, value):
+ session.url = inline(get_shadow_page("<div>{0}</div>".format(document)))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ expected = session.execute_script("return arguments[0].shadowRoot.querySelectorAll('a')[0]",
+ args=(custom_element,))
+ shadow_root = custom_element.shadow_root
+
+ response = find_element(session, shadow_root.id, "partial link text", value)
+ value = assert_success(response)
+ assert_same_element(session, value, expected)
diff --git a/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/user_prompts.py b/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/user_prompts.py
new file mode 100644
index 0000000000..ee79ed79f4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_element_from_shadow_root/user_prompts.py
@@ -0,0 +1,129 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import (
+ assert_error,
+ assert_same_element,
+ assert_success,
+ assert_dialog_handled,
+)
+
+
+def find_element(session, shadow_id, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/shadow/{shadow_id}/element".format(
+ session_id=session.session_id,
+ shadow_id=shadow_id),
+ {"using": using, "value": value})
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline, get_shadow_page):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline(get_shadow_page("<div><p>bar</p><div>"))
+ outer_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = outer_element.shadow_root
+ inner_element = session.execute_script("return arguments[0].shadowRoot.querySelector('p')",
+ args=(outer_element,))
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_element(session, shadow_root.id, "css selector", "p")
+ value = assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert_same_element(session, value, inner_element)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline, get_shadow_page):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline(get_shadow_page("<div><p>bar</p><div>"))
+ outer_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = outer_element.shadow_root
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_element(session, shadow_root.id, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline, get_shadow_page):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline(get_shadow_page("<div><p>bar</p><div>"))
+ outer_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = outer_element.shadow_root
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_element(session, shadow_root.id, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/find_elements/__init__.py b/testing/web-platform/tests/webdriver/tests/find_elements/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_elements/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/find_elements/find.py b/testing/web-platform/tests/webdriver/tests/find_elements/find.py
new file mode 100644
index 0000000000..6f1283bf35
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_elements/find.py
@@ -0,0 +1,162 @@
+import pytest
+
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_same_element, assert_success
+
+
+def find_elements(session, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/elements".format(**vars(session)),
+ {"using": using, "value": value})
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/elements".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = find_elements(session, "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = find_elements(session, "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+@pytest.mark.parametrize(
+ "value",
+ ["#same1", "#frame", "#shadow"],
+ ids=["not-existent", "existent-other-frame", "existent-shadow-dom"],
+)
+def test_no_such_element_with_invalid_value(session, iframe, inline, value):
+ session.url = inline(f"""
+ <style>
+ custom-checkbox-element {{
+ display:block; width:20px; height:20px;
+ }}
+ </style>
+ <script>
+ customElements.define('custom-checkbox-element',
+ class extends HTMLElement {{
+ constructor() {{
+ super();
+ this.attachShadow({{mode: 'open'}}).innerHTML = `
+ <div id="shadow"><input type="checkbox"/></div>
+ `;
+ }}
+ }});
+ </script>
+ <div id="top">
+ <div id="same"/>
+ {iframe("<div id='frame'>")}
+ <custom-checkbox-element id='checkbox'></custom-checkbox-element>
+ </div>""")
+
+ response = find_elements(session, "css selector", value)
+ assert_success(response)
+ assert response.body["value"] == []
+
+
+@pytest.mark.parametrize("using", ["a", True, None, 1, [], {}])
+def test_invalid_using_argument(session, using):
+ response = find_elements(session, using, "value")
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("value", [None, [], {}])
+def test_invalid_selector_argument(session, value):
+ response = find_elements(session, "css selector", value)
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", "#linkText"),
+ ("link text", "full link text"),
+ ("partial link text", "link text"),
+ ("tag name", "a"),
+ ("xpath", "//a")])
+def test_find_elements(session, inline, using, value):
+ session.url = inline("<a href=# id=linkText>full link text</a>")
+
+ response = find_elements(session, using, value)
+ assert_success(response)
+ assert len(response.body["value"]) == 1
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>link text</a>", "link text"),
+ ("<a href=#>&nbsp;link text&nbsp;</a>", "link text"),
+ ("<a href=#>link<br>text</a>", "link\ntext"),
+ ("<a href=#>link&amp;text</a>", "link&text"),
+ ("<a href=#>LINK TEXT</a>", "LINK TEXT"),
+ ("<a href=# style='text-transform: uppercase'>link text</a>", "LINK TEXT"),
+])
+def test_find_elements_link_text(session, inline, document, value):
+ session.url = inline("<a href=#>not wanted</a><br/>{0}".format(document))
+ expected = session.execute_script("return document.links[1];")
+
+ response = find_elements(session, "link text", value)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ found_element = value[0]
+ assert_same_element(session, found_element, expected)
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>partial link text</a>", "link"),
+ ("<a href=#>&nbsp;partial link text&nbsp;</a>", "link"),
+ ("<a href=#>partial link text</a>", "k t"),
+ ("<a href=#>partial link<br>text</a>", "k\nt"),
+ ("<a href=#>partial link&amp;text</a>", "k&t"),
+ ("<a href=#>PARTIAL LINK TEXT</a>", "LINK"),
+ ("<a href=# style='text-transform: uppercase'>partial link text</a>", "LINK"),
+])
+def test_find_elements_partial_link_text(session, inline, document, value):
+ session.url = inline("<a href=#>not wanted</a><br/>{0}".format(document))
+ expected = session.execute_script("return document.links[1];")
+
+ response = find_elements(session, "partial link text", value)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ found_element = value[0]
+ assert_same_element(session, found_element, expected)
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", "#linkText"),
+ ("link text", "full link text"),
+ ("partial link text", "link text"),
+ ("tag name", "a"),
+ ("xpath", "//*[name()='a']")])
+def test_xhtml_namespace(session, inline, using, value):
+ session.url = inline("""<a href="#" id="linkText">full link text</a>""",
+ doctype="xhtml")
+ expected = session.execute_script("return document.links[0];")
+
+ response = find_elements(session, using, value)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ found_element = value[0]
+ assert_same_element(session, found_element, expected)
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", ":root"),
+ ("tag name", "html"),
+ ("xpath", "/html")])
+def test_htmldocument(session, inline, using, value):
+ session.url = inline("")
+ response = find_elements(session, using, value)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
diff --git a/testing/web-platform/tests/webdriver/tests/find_elements/user_prompts.py b/testing/web-platform/tests/webdriver/tests/find_elements/user_prompts.py
new file mode 100644
index 0000000000..f9a45e5275
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_elements/user_prompts.py
@@ -0,0 +1,122 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import (
+ assert_error,
+ assert_same_element,
+ assert_success,
+ assert_dialog_handled,
+)
+
+
+def find_elements(session, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/elements".format(**vars(session)),
+ {"using": using, "value": value})
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<p>bar</p>")
+ element = session.find.css("p", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_elements(session, "css selector", "p")
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert_same_element(session, value[0], element)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<p>bar</p>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_elements(session, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<p>bar</p>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_elements(session, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/find_elements_from_element/__init__.py b/testing/web-platform/tests/webdriver/tests/find_elements_from_element/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_elements_from_element/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/find_elements_from_element/find.py b/testing/web-platform/tests/webdriver/tests/find_elements_from_element/find.py
new file mode 100644
index 0000000000..0e3bf2fd11
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_elements_from_element/find.py
@@ -0,0 +1,210 @@
+import pytest
+
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_same_element, assert_success
+
+
+def find_elements(session, element_id, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/elements".format(
+ session_id=session.session_id,
+ element_id=element_id),
+ {"using": using, "value": value})
+
+
+def test_null_parameter_value(session, http, inline):
+ session.url = inline("<div><a href=# id=linkText>full link text</a></div>")
+ element = session.find.css("div", all=False)
+
+ path = "/session/{session_id}/element/{element_id}/elements".format(
+ session_id=session.session_id, element_id=element.id)
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = find_elements(session, "notReal", "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = find_elements(session, "notReal", "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+@pytest.mark.parametrize(
+ "value",
+ ["#same1", "#frame", "#shadow"],
+ ids=["not-existent", "existent-other-frame", "existent-shadow-dom"],
+)
+def test_no_such_element_with_invalid_value(session, iframe, inline, value):
+ session.url = inline(f"""
+ <style>
+ custom-checkbox-element {{
+ display:block; width:20px; height:20px;
+ }}
+ </style>
+ <script>
+ customElements.define('custom-checkbox-element',
+ class extends HTMLElement {{
+ constructor() {{
+ super();
+ this.attachShadow({{mode: 'open'}}).innerHTML = `
+ <div id="shadow"><input type="checkbox"/></div>
+ `;
+ }}
+ }});
+ </script>
+ <div id="top">
+ <div id="same"/>
+ {iframe("<div id='frame'>")}
+ <custom-checkbox-element id='checkbox'></custom-checkbox-element>
+ </div>""")
+
+ element = session.find.css("#top", all=False)
+ response = find_elements(session, element.id, "css selector", value)
+ assert response.body["value"] == []
+
+
+def test_no_such_element_with_startnode_from_other_window_handle(session, inline):
+ session.url = inline("<div id='parent'><p/>")
+ from_element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+ session.window_handle = new_handle
+
+ response = find_elements(session, from_element.id, "css selector", "p")
+ assert_error(response, "no such element")
+
+
+def test_no_such_element_with_startnode_from_other_frame(session, iframe, inline):
+ session.url = inline(iframe("<div id='parent'><p/>"))
+
+ session.switch_frame(0)
+ from_element = session.find.css("#parent", all=False)
+ session.switch_frame("parent")
+
+ response = find_elements(session, from_element.id, "css selector", "p")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<div><p>foo</p></div>", "div", as_frame=as_frame)
+
+ response = find_elements(session, element.id, "css selector", "p")
+ assert_error(response, "stale element reference")
+
+
+@pytest.mark.parametrize("using", [("a"), (True), (None), (1), ([]), ({})])
+def test_invalid_using_argument(session, using):
+ response = find_elements(session, "notReal", using, "value")
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("value", [None, [], {}])
+def test_invalid_selector_argument(session, value):
+ response = find_elements(session, "notReal", "css selector", value)
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", "#linkText"),
+ ("link text", "full link text"),
+ ("partial link text", "link text"),
+ ("tag name", "a"),
+ ("xpath", "//a")])
+def test_find_elements(session, inline, using, value):
+ session.url = inline("<div><a href=# id=linkText>full link text</a></div>")
+ element = session.find.css("div", all=False)
+ response = find_elements(session, element.id, using, value)
+ assert_success(response)
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>link text</a>", "link text"),
+ ("<a href=#>&nbsp;link text&nbsp;</a>", "link text"),
+ ("<a href=#>link<br>text</a>", "link\ntext"),
+ ("<a href=#>link&amp;text</a>", "link&text"),
+ ("<a href=#>LINK TEXT</a>", "LINK TEXT"),
+ ("<a href=# style='text-transform: uppercase'>link text</a>", "LINK TEXT"),
+])
+def test_find_elements_link_text(session, inline, document, value):
+ session.url = inline("<div><a href=#>not wanted</a><br/>{0}</div>".format(document))
+ element = session.find.css("div", all=False)
+ expected = session.execute_script("return document.links[1];")
+
+ response = find_elements(session, element.id, "link text", value)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ found_element = value[0]
+ assert_same_element(session, found_element, expected)
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>partial link text</a>", "link"),
+ ("<a href=#>&nbsp;partial link text&nbsp;</a>", "link"),
+ ("<a href=#>partial link text</a>", "k t"),
+ ("<a href=#>partial link<br>text</a>", "k\nt"),
+ ("<a href=#>partial link&amp;text</a>", "k&t"),
+ ("<a href=#>PARTIAL LINK TEXT</a>", "LINK"),
+ ("<a href=# style='text-transform: uppercase'>partial link text</a>", "LINK"),
+])
+def test_find_elements_partial_link_text(session, inline, document, value):
+ session.url = inline("<div><a href=#>not wanted</a><br/>{0}</div>".format(document))
+ element = session.find.css("div", all=False)
+ expected = session.execute_script("return document.links[1];")
+
+ response = find_elements(session, element.id, "partial link text", value)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ found_element = value[0]
+ assert_same_element(session, found_element, expected)
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", "#linkText"),
+ ("link text", "full link text"),
+ ("partial link text", "link text"),
+ ("tag name", "a"),
+ ("xpath", "//*[name()='a']")])
+def test_xhtml_namespace(session, inline, using, value):
+ session.url = inline("""<p><a href="#" id="linkText">full link text</a></p>""",
+ doctype="xhtml")
+ from_element = session.execute_script("""return document.querySelector("p")""")
+ expected = session.execute_script("return document.links[0]")
+
+ response = find_elements(session, from_element.id, using, value)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ found_element = value[0]
+ assert_same_element(session, found_element, expected)
+
+
+def test_parent_htmldocument(session, inline):
+ session.url = inline("")
+ from_element = session.execute_script("""return document.querySelector("body")""")
+ expected = session.execute_script("return document.documentElement")
+
+ response = find_elements(session, from_element.id, "xpath", "..")
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ found_element = value[0]
+ assert_same_element(session, found_element, expected)
+
+
+def test_parent_of_document_node_errors(session, inline):
+ session.url = inline("")
+ from_element = session.execute_script("return document.documentElement")
+
+ response = find_elements(session, from_element.id, "xpath", "..")
+ assert_error(response, "invalid selector")
diff --git a/testing/web-platform/tests/webdriver/tests/find_elements_from_element/user_prompts.py b/testing/web-platform/tests/webdriver/tests/find_elements_from_element/user_prompts.py
new file mode 100644
index 0000000000..467bec09a1
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_elements_from_element/user_prompts.py
@@ -0,0 +1,127 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import (
+ assert_error,
+ assert_same_element,
+ assert_success,
+ assert_dialog_handled,
+)
+
+
+def find_elements(session, element_id, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/element/{element_id}/elements".format(
+ session_id=session.session_id,
+ element_id=element_id),
+ {"using": using, "value": value})
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<div><p>bar</p><div>")
+ outer_element = session.find.css("div", all=False)
+ inner_element = session.find.css("p", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_elements(session, outer_element.id, "css selector", "p")
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert_same_element(session, value[0], inner_element)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<div><p>bar</p><div>")
+ outer_element = session.find.css("div", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_elements(session, outer_element.id, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<div><p>bar</p><div>")
+ outer_element = session.find.css("div", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_elements(session, outer_element.id, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/__init__.py b/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/conftest.py b/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/conftest.py
new file mode 100644
index 0000000000..5fd5f8a065
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/conftest.py
@@ -0,0 +1,19 @@
+import pytest
+
+@pytest.fixture
+def get_shadow_page():
+ def get_shadow_page(shadow_content):
+ return """
+ <custom-shadow-element></custom-shadow-element>
+ <script>
+ customElements.define('custom-shadow-element',
+ class extends HTMLElement {{
+ constructor() {{
+ super();
+ this.attachShadow({{mode: 'open'}}).innerHTML = `
+ {{ {0} }}
+ `;
+ }}
+ }});
+ </script>""".format(shadow_content)
+ return get_shadow_page
diff --git a/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/find.py b/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/find.py
new file mode 100644
index 0000000000..c21f081125
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/find.py
@@ -0,0 +1,203 @@
+import pytest
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_same_element, assert_success
+
+
+def find_elements(session, shadow_id, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/shadow/{shadow_id}/elements".format(
+ session_id=session.session_id,
+ shadow_id=shadow_id),
+ {"using": using, "value": value})
+
+
+def test_null_parameter_value(session, http, get_shadow_page):
+ session.url = get_shadow_page("<div><a href=# id=linkText>full link text</a></div>")
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+
+ path = "/session/{session_id}/shadow/{shadow_id}/elements".format(
+ session_id=session.session_id, shadow_id=shadow_root.id)
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = find_elements(session, "notReal", "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = find_elements(session, "notReal", "css selector", "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_unknown_shadow_root(session, inline, get_shadow_page):
+ session.url = inline(get_shadow_page("<div><input type='checkbox'/></div>"))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+
+ session.url = inline("<p>")
+
+ result = find_elements(session, shadow_root.id, "css selector", "input")
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize(
+ "value",
+ ["#doesNotExist", "#inner"],
+ ids=["not-existent", "existent-inner-shadow-dom"],
+)
+def test_no_such_element_with_invalid_value(
+ session, iframe, inline, get_shadow_page, value
+):
+ session.url = inline(get_shadow_page(f"""
+ <div id="outer"/>
+ {get_shadow_page("<div id='inner'>")}
+ """))
+
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+
+ response = find_elements(session, shadow_root.id, "css selector", value)
+ assert_error(response, "no such element")
+
+
+def test_no_such_element_with_shadow_root_from_other_window_handle(
+ session, inline, get_shadow_page
+):
+ session.url = inline(get_shadow_page("<div>"))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+
+ new_handle = session.new_window()
+ session.window_handle = new_handle
+
+ response = find_elements(session, shadow_root.id, "css selector", "div")
+ assert_error(response, "no such element")
+
+
+def test_no_such_element_with_shadow_root_from_other_frame(
+ session, iframe, inline, get_shadow_page
+):
+ session.url = inline(iframe(get_shadow_page("<div>")))
+
+ session.switch_frame(0)
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+ session.switch_frame("parent")
+
+ response = find_elements(session, shadow_root.id, "css selector", "div")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_detached_shadow_root(session, iframe, inline, get_shadow_page, as_frame):
+ page = get_shadow_page("<div><input type='checkbox'/></div>")
+
+ if as_frame:
+ session.url = inline(iframe(page))
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+ else:
+ session.url = inline(page)
+
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+
+ session.execute_script("arguments[0].remove();", args=[custom_element])
+
+ response = find_elements(session, shadow_root.id, "css selector", "input")
+ assert_error(response, "detached shadow root")
+
+
+@pytest.mark.parametrize("using", [("a"), (True), (None), (1), ([]), ({})])
+def test_invalid_using_argument(session, using):
+ # Step 1 - 2
+ response = find_elements(session, "notReal", using, "value")
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("value", [None, [], {}])
+def test_invalid_selector_argument(session, value):
+ # Step 3 - 4
+ response = find_elements(session, "notReal", "css selector", value)
+ assert_error(response, "invalid argument")
+
+
+def test_find_elements_equivalence(session, inline, get_shadow_page):
+ session.url = inline(get_shadow_page(
+ "<div><input id='check' type='checkbox'/><input id='text'/></div>"))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+ response = find_elements(session, shadow_root.id, "css selector", "input")
+ assert_success(response)
+
+
+@pytest.mark.parametrize("using,value",
+ [("css selector", "#linkText"),
+ ("link text", "full link text"),
+ ("partial link text", "link text"),
+ ("tag name", "a"),
+ ("xpath", "//a")])
+def test_find_elements(session, inline, get_shadow_page, using, value):
+ # Step 8 - 9
+ session.url = inline(get_shadow_page("<div><a href=# id=linkText>full link text</a></div>"))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+ response = find_elements(session, shadow_root.id, using, value)
+ assert_success(response)
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>link text</a>", "link text"),
+ ("<a href=#>&nbsp;link text&nbsp;</a>", "link text"),
+ ("<a href=#>link<br>text</a>", "link\ntext"),
+ ("<a href=#>link&amp;text</a>", "link&text"),
+ ("<a href=#>LINK TEXT</a>", "LINK TEXT"),
+ ("<a href=# style='text-transform: uppercase'>link text</a>", "LINK TEXT"),
+])
+def test_find_elements_link_text(session, inline, get_shadow_page, document, value):
+ # Step 8 - 9
+ session.url = inline(get_shadow_page(
+ "<div><a href=#>not wanted</a><br/>{0}</div>".format(document)))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+ expected = session.execute_script("return arguments[0].shadowRoot.querySelectorAll('a')[1]",
+ args=(custom_element,))
+
+ response = find_elements(session, shadow_root.id, "link text", value)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ found_element = value[0]
+ assert_same_element(session, found_element, expected)
+
+
+@pytest.mark.parametrize("document,value", [
+ ("<a href=#>partial link text</a>", "link"),
+ ("<a href=#>&nbsp;partial link text&nbsp;</a>", "link"),
+ ("<a href=#>partial link text</a>", "k t"),
+ ("<a href=#>partial link<br>text</a>", "k\nt"),
+ ("<a href=#>partial link&amp;text</a>", "k&t"),
+ ("<a href=#>PARTIAL LINK TEXT</a>", "LINK"),
+ ("<a href=# style='text-transform: uppercase'>partial link text</a>", "LINK"),
+])
+def test_find_elements_partial_link_text(session, inline, get_shadow_page, document, value):
+ # Step 8 - 9
+ session.url = inline(get_shadow_page(
+ "<div><a href=#>not wanted</a><br/>{0}</div>".format(document)))
+ custom_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = custom_element.shadow_root
+ expected = session.execute_script("return arguments[0].shadowRoot.querySelectorAll('a')[1]",
+ args=(custom_element,))
+
+ response = find_elements(session, shadow_root.id, "partial link text", value)
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ found_element = value[0]
+ assert_same_element(session, found_element, expected)
diff --git a/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/user_prompts.py b/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/user_prompts.py
new file mode 100644
index 0000000000..8ec381b387
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/find_elements_from_shadow_root/user_prompts.py
@@ -0,0 +1,131 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import (
+ assert_error,
+ assert_same_element,
+ assert_success,
+ assert_dialog_handled,
+)
+
+
+def find_elements(session, shadow_id, using, value):
+ return session.transport.send(
+ "POST", "session/{session_id}/shadow/{shadow_id}/elements".format(
+ session_id=session.session_id,
+ shadow_id=shadow_id),
+ {"using": using, "value": value})
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline, get_shadow_page):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline(get_shadow_page("<div><p>bar</p><div>"))
+ outer_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = outer_element.shadow_root
+ inner_element = session.execute_script("return arguments[0].shadowRoot.querySelector('p')",
+ args=(outer_element,))
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_elements(session, shadow_root.id, "css selector", "p")
+ value = assert_success(response)
+ assert isinstance(value, list)
+ assert len(value) == 1
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert_same_element(session, value[0], inner_element)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline, get_shadow_page):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline(get_shadow_page("<div><p>bar</p><div>"))
+ outer_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = outer_element.shadow_root
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_elements(session, shadow_root.id, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline, get_shadow_page):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline(get_shadow_page("<div><p>bar</p><div>"))
+ outer_element = session.find.css("custom-shadow-element", all=False)
+ shadow_root = outer_element.shadow_root
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = find_elements(session, shadow_root.id, "css selector", "p")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/forward/__init__.py b/testing/web-platform/tests/webdriver/tests/forward/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/forward/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/forward/conftest.py b/testing/web-platform/tests/webdriver/tests/forward/conftest.py
new file mode 100644
index 0000000000..bd5db0cfeb
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/forward/conftest.py
@@ -0,0 +1,19 @@
+import pytest
+
+from webdriver.error import NoSuchWindowException
+
+
+@pytest.fixture(name="session")
+def fixture_session(capabilities, session):
+ """Prevent re-using existent history by running the test in a new window."""
+ original_handle = session.window_handle
+ session.window_handle = session.new_window()
+
+ yield session
+
+ try:
+ session.window.close()
+ except NoSuchWindowException:
+ pass
+
+ session.window_handle = original_handle
diff --git a/testing/web-platform/tests/webdriver/tests/forward/forward.py b/testing/web-platform/tests/webdriver/tests/forward/forward.py
new file mode 100644
index 0000000000..f27be403f9
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/forward/forward.py
@@ -0,0 +1,195 @@
+import pytest
+from webdriver import error
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def forward(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/forward".format(**vars(session)))
+
+
+def test_null_response_value(session, inline):
+ session.url = inline("<div>")
+ session.url = inline("<p>")
+ session.back()
+
+ response = forward(session)
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = forward(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = forward(session)
+ assert_success(response)
+
+
+def test_basic(session, inline):
+ url = inline("<div id=foo>")
+
+ session.url = inline("<div id=bar>")
+ session.url = url
+ session.back()
+
+ element = session.find.css("#bar", all=False)
+
+ response = forward(session)
+ assert_success(response)
+
+ with pytest.raises(error.StaleElementReferenceException):
+ element.property("id")
+
+ assert session.url == url
+ assert session.find.css("#foo", all=False)
+
+
+def test_no_browsing_history(session, inline):
+ url = inline("<div id=foo>")
+
+ session.url = url
+ element = session.find.css("#foo", all=False)
+
+ response = forward(session)
+ assert_success(response)
+
+ assert session.url == url
+ assert element.property("id") == "foo"
+
+
+def test_data_urls(session, inline):
+ test_pages = [
+ inline("<p id=1>"),
+ inline("<p id=2>"),
+ ]
+
+ for page in test_pages:
+ session.url = page
+
+ session.back()
+ assert session.url == test_pages[0]
+
+ response = forward(session)
+ assert_success(response)
+ assert session.url == test_pages[1]
+
+
+def test_dismissed_beforeunload(session, inline):
+ url_beforeunload = inline("""
+ <input type="text">
+ <script>
+ window.addEventListener("beforeunload", function (event) {
+ event.preventDefault();
+ });
+ </script>
+ """)
+
+ session.url = url_beforeunload
+ session.url = inline("<div id=foo>")
+ session.back()
+
+ element = session.find.css("input", all=False)
+ element.send_keys("bar")
+
+ response = forward(session)
+ assert_success(response)
+
+ assert session.url != url_beforeunload
+
+
+def test_fragments(session, url):
+ test_pages = [
+ url("/common/blank.html"),
+ url("/common/blank.html#1234"),
+ url("/common/blank.html#5678"),
+ ]
+
+ for page in test_pages:
+ session.url = page
+
+ session.back()
+ assert session.url == test_pages[1]
+
+ session.back()
+ assert session.url == test_pages[0]
+
+ response = forward(session)
+ assert_success(response)
+ assert session.url == test_pages[1]
+
+ response = forward(session)
+ assert_success(response)
+ assert session.url == test_pages[2]
+
+
+def test_history_pushstate(session, inline):
+ pushstate_page = inline("""
+ <script>
+ function pushState() {
+ history.pushState({foo: "bar"}, "", "#pushstate");
+ }
+ </script>
+ <a onclick="javascript:pushState();">click</a>
+ """)
+
+ session.url = pushstate_page
+
+ session.find.css("a", all=False).click()
+ assert session.url == "{}#pushstate".format(pushstate_page)
+ assert session.execute_script("return history.state;") == {"foo": "bar"}
+
+ session.back()
+ assert session.url == pushstate_page
+ assert session.execute_script("return history.state;") is None
+
+ response = forward(session)
+ assert_success(response)
+
+ assert session.url == "{}#pushstate".format(pushstate_page)
+ assert session.execute_script("return history.state;") == {"foo": "bar"}
+
+
+def test_removed_iframe(session, url, inline):
+ page = inline("<p>foo")
+
+ session.url = url("/webdriver/tests/support/html/frames_no_bfcache.html")
+ session.url = page
+
+ session.back()
+
+ subframe = session.find.css("#sub-frame", all=False)
+ session.switch_frame(subframe)
+
+ response = forward(session)
+ assert_success(response)
+
+ assert session.url == page
+
+
+# Capability needed as long as no valid certificate is available:
+# https://github.com/web-platform-tests/wpt/issues/28847
+@pytest.mark.capabilities({"acceptInsecureCerts": True})
+def test_cross_origin(session, url):
+ base_path = ("/webdriver/tests/support/html/subframe.html" +
+ "?pipe=header(Cross-Origin-Opener-Policy,same-origin")
+ first_page = url(base_path, protocol="https")
+ second_page = url(base_path, protocol="https", domain="alt")
+
+ session.url = first_page
+ session.url = second_page
+ session.back()
+
+ elem = session.find.css("#delete", all=False)
+
+ response = forward(session)
+ assert_success(response)
+
+ assert session.url == second_page
+
+ with pytest.raises(error.NoSuchElementException):
+ elem.click()
+ elem = session.find.css("#delete", all=False)
diff --git a/testing/web-platform/tests/webdriver/tests/forward/user_prompts.py b/testing/web-platform/tests/webdriver/tests/forward/user_prompts.py
new file mode 100644
index 0000000000..3eeaf6e71c
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/forward/user_prompts.py
@@ -0,0 +1,121 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def forward(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/forward".format(**vars(session)))
+
+
+@pytest.fixture
+def pages(session, inline):
+ pages = [
+ inline("<p id=1>"),
+ inline("<p id=2>"),
+ ]
+
+ for page in pages:
+ session.url = page
+
+ session.back()
+
+ return pages
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, pages):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = forward(session)
+ assert_success(response)
+
+ # retval not testable for confirm and prompt because window is gone
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=None)
+
+ assert session.url == pages[1]
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, pages):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = forward(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.url == pages[0]
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, pages):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = forward(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert session.url == pages[0]
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type):
+ # retval not testable for confirm and prompt because window is gone
+ check_user_prompt_closed_without_exception(dialog_type, None)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type):
+ # retval not testable for confirm and prompt because window is gone
+ check_user_prompt_closed_without_exception(dialog_type, None)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/fullscreen_window/__init__.py b/testing/web-platform/tests/webdriver/tests/fullscreen_window/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/fullscreen_window/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/fullscreen_window/fullscreen.py b/testing/web-platform/tests/webdriver/tests/fullscreen_window/fullscreen.py
new file mode 100644
index 0000000000..94f25ed9bd
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/fullscreen_window/fullscreen.py
@@ -0,0 +1,53 @@
+from tests.support.asserts import assert_error, assert_success
+from tests.support.helpers import is_fullscreen
+
+
+def fullscreen(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/fullscreen".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = fullscreen(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = fullscreen(session)
+ assert_success(response)
+
+
+def test_fullscreen(session):
+ response = fullscreen(session)
+ assert_success(response)
+
+ assert is_fullscreen(session)
+
+
+def test_payload(session):
+ response = fullscreen(session)
+
+ assert response.status == 200
+ assert isinstance(response.body["value"], dict)
+
+ value = response.body["value"]
+ assert "width" in value
+ assert "height" in value
+ assert "x" in value
+ assert "y" in value
+ assert isinstance(value["width"], int)
+ assert isinstance(value["height"], int)
+ assert isinstance(value["x"], int)
+ assert isinstance(value["y"], int)
+
+
+def test_fullscreen_twice_is_idempotent(session):
+ assert not is_fullscreen(session)
+
+ first_response = fullscreen(session)
+ assert_success(first_response)
+ assert is_fullscreen(session)
+
+ second_response = fullscreen(session)
+ assert_success(second_response)
+ assert is_fullscreen(session)
diff --git a/testing/web-platform/tests/webdriver/tests/fullscreen_window/stress.py b/testing/web-platform/tests/webdriver/tests/fullscreen_window/stress.py
new file mode 100644
index 0000000000..b907a31f17
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/fullscreen_window/stress.py
@@ -0,0 +1,19 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_success
+from tests.support.helpers import is_fullscreen
+
+
+def fullscreen_window(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/fullscreen".format(**vars(session)))
+
+
+@pytest.mark.parametrize("i", range(5))
+def test_stress(session, i):
+ assert not is_fullscreen(session)
+ response = fullscreen_window(session)
+ assert_success(response)
+ assert is_fullscreen(session)
diff --git a/testing/web-platform/tests/webdriver/tests/fullscreen_window/user_prompts.py b/testing/web-platform/tests/webdriver/tests/fullscreen_window/user_prompts.py
new file mode 100644
index 0000000000..106bc457f0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/fullscreen_window/user_prompts.py
@@ -0,0 +1,116 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+from tests.support.helpers import is_fullscreen
+
+
+def fullscreen(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/fullscreen".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ assert not is_fullscreen(session)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = fullscreen(session)
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+ assert is_fullscreen(session)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ assert not is_fullscreen(session)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = fullscreen(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+ assert not is_fullscreen(session)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ assert not is_fullscreen(session)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = fullscreen(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert not is_fullscreen(session)
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_active_element/__init__.py b/testing/web-platform/tests/webdriver/tests/get_active_element/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_active_element/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_active_element/get.py b/testing/web-platform/tests/webdriver/tests/get_active_element/get.py
new file mode 100644
index 0000000000..2b79ebd584
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_active_element/get.py
@@ -0,0 +1,132 @@
+from tests.support.asserts import assert_error, assert_is_active_element, assert_success
+
+
+def read_global(session, name):
+ return session.execute_script("return %s;" % name)
+
+
+def get_active_element(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/active".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = get_active_element(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_active_element(session)
+ assert_error(response, "no such window")
+
+
+def test_no_such_element(session, inline):
+ session.url = inline("<body></body>")
+ session.execute_script("""
+ if (document.body.remove) {
+ document.body.remove();
+ } else {
+ document.body.removeNode(true);
+ }""")
+
+ response = get_active_element(session)
+ assert_error(response, "no such element")
+
+
+def test_success_document(session, inline):
+ session.url = inline("""
+ <body>
+ <h1>Heading</h1>
+ <input />
+ <input />
+ <input style="opacity: 0" />
+ <p>Another element</p>
+ </body>""")
+
+ response = get_active_element(session)
+ element = assert_success(response)
+ assert_is_active_element(session, element)
+
+
+def test_success_input(session, inline):
+ session.url = inline("""
+ <body>
+ <h1>Heading</h1>
+ <input autofocus />
+ <input style="opacity: 0" />
+ <p>Another element</p>
+ </body>""")
+
+ response = get_active_element(session)
+ element = assert_success(response)
+ assert_is_active_element(session, element)
+
+
+def test_success_input_non_interactable(session, inline):
+ session.url = inline("""
+ <body>
+ <h1>Heading</h1>
+ <input />
+ <input style="opacity: 0" autofocus />
+ <p>Another element</p>
+ </body>""")
+
+ response = get_active_element(session)
+ element = assert_success(response)
+ assert_is_active_element(session, element)
+
+
+def test_success_explicit_focus(session, inline):
+ session.url = inline("""
+ <body>
+ <h1>Heading</h1>
+ <input />
+ <iframe></iframe>
+ </body>""")
+
+ session.execute_script("document.body.getElementsByTagName('h1')[0].focus()")
+ response = get_active_element(session)
+ element = assert_success(response)
+ assert_is_active_element(session, element)
+
+ session.execute_script("document.body.getElementsByTagName('input')[0].focus()")
+ response = get_active_element(session)
+ element = assert_success(response)
+ assert_is_active_element(session, element)
+
+ session.execute_script("document.body.getElementsByTagName('iframe')[0].focus()")
+ response = get_active_element(session)
+ element = assert_success(response)
+ assert_is_active_element(session, element)
+
+ session.execute_script("document.body.getElementsByTagName('iframe')[0].focus();")
+ session.execute_script("""
+ var iframe = document.body.getElementsByTagName('iframe')[0];
+ if (iframe.remove) {
+ iframe.remove();
+ } else {
+ iframe.removeNode(true);
+ }""")
+ response = get_active_element(session)
+ element = assert_success(response)
+ assert_is_active_element(session, element)
+
+ session.execute_script("document.body.appendChild(document.createElement('textarea'))")
+ response = get_active_element(session)
+ element = assert_success(response)
+ assert_is_active_element(session, element)
+
+
+def test_success_iframe_content(session, inline):
+ session.url = inline("<body></body>")
+ session.execute_script("""
+ let iframe = document.createElement('iframe');
+ document.body.appendChild(iframe);
+ let input = iframe.contentDocument.createElement('input');
+ iframe.contentDocument.body.appendChild(input);
+ input.focus();
+ """)
+
+ response = get_active_element(session)
+ element = assert_success(response)
+ assert_is_active_element(session, element)
diff --git a/testing/web-platform/tests/webdriver/tests/get_active_element/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_active_element/user_prompts.py
new file mode 100644
index 0000000000..1ff77697b7
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_active_element/user_prompts.py
@@ -0,0 +1,118 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import (
+ assert_dialog_handled,
+ assert_error,
+ assert_is_active_element,
+ assert_success
+)
+
+
+def get_active_element(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/active".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input type=text>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_active_element(session)
+ element = assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert_is_active_element(session, element)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input type=text>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_active_element(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input type=text>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_active_element(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_alert_text/__init__.py b/testing/web-platform/tests/webdriver/tests/get_alert_text/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_alert_text/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_alert_text/get.py b/testing/web-platform/tests/webdriver/tests/get_alert_text/get.py
new file mode 100644
index 0000000000..7ee7ff1808
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_alert_text/get.py
@@ -0,0 +1,70 @@
+from webdriver.error import NoSuchAlertException
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.sync import Poll
+
+
+def get_alert_text(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/alert/text".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = get_alert_text(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_alert_text(session)
+ assert_error(response, "no such alert")
+
+
+def test_no_user_prompt(session):
+ response = get_alert_text(session)
+ assert_error(response, "no such alert")
+
+
+def test_get_alert_text(session, inline):
+ session.url = inline("<script>window.alert('Hello');</script>")
+ response = get_alert_text(session)
+ assert_success(response)
+ assert isinstance(response.body, dict)
+ assert "value" in response.body
+ alert_text = response.body["value"]
+ assert isinstance(alert_text, str)
+ assert alert_text == "Hello"
+
+
+def test_get_confirm_text(session, inline):
+ session.url = inline("<script>window.confirm('Hello');</script>")
+ response = get_alert_text(session)
+ assert_success(response)
+ assert isinstance(response.body, dict)
+ assert "value" in response.body
+ confirm_text = response.body["value"]
+ assert isinstance(confirm_text, str)
+ assert confirm_text == "Hello"
+
+
+def test_get_prompt_text(session, inline):
+ session.url = inline("<script>window.prompt('Enter Your Name: ', 'Federer');</script>")
+ response = get_alert_text(session)
+ assert_success(response)
+ assert isinstance(response.body, dict)
+ assert "value" in response.body
+ prompt_text = response.body["value"]
+ assert isinstance(prompt_text, str)
+ assert prompt_text == "Enter Your Name: "
+
+
+def test_unexpected_alert(session):
+ session.execute_script("setTimeout(function() { alert('Hello'); }, 100);")
+ wait = Poll(
+ session,
+ timeout=5,
+ ignored_exceptions=NoSuchAlertException,
+ message="No user prompt with text 'Hello' detected")
+ wait.until(lambda s: s.alert.text == "Hello")
+
+ response = get_alert_text(session)
+ assert_success(response)
diff --git a/testing/web-platform/tests/webdriver/tests/get_computed_label/__init__.py b/testing/web-platform/tests/webdriver/tests/get_computed_label/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_computed_label/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_computed_label/get.py b/testing/web-platform/tests/webdriver/tests/get_computed_label/get.py
new file mode 100644
index 0000000000..32b4ce33bb
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_computed_label/get.py
@@ -0,0 +1,79 @@
+import pytest
+
+from webdriver import Element
+from webdriver.error import NoSuchAlertException
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def get_computed_label(session, element_id):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/computedlabel".format(
+ session_id=session.session_id,
+ element_id=element_id))
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_computed_label(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ result = get_computed_label(session, element.id)
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ result = get_computed_label(session, element.id)
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ result = get_computed_label(session, button.id)
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ response = get_computed_label(session, element.id)
+ assert_error(response, "stale element reference")
+
+
+@pytest.mark.parametrize("html,tag,label", [
+ ("<button>ok</button>", "button", "ok"),
+ ("<button aria-labelledby=\"one two\"></button><div id=one>ok</div><div id=two>go</div>", "button", "ok go"),
+ ("<button aria-label=foo>bar</button>", "button", "foo"),
+ ("<label><input> foo</label>", "input", "foo"),
+ ("<label for=b>foo<label><input id=b>", "input", "foo")])
+def test_get_computed_label(session, inline, html, tag, label):
+ session.url = inline(html)
+ element = session.find.css(tag, all=False)
+ result = get_computed_label(session, element.id)
+ assert_success(result, label)
diff --git a/testing/web-platform/tests/webdriver/tests/get_computed_role/__init__.py b/testing/web-platform/tests/webdriver/tests/get_computed_role/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_computed_role/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_computed_role/get.py b/testing/web-platform/tests/webdriver/tests/get_computed_role/get.py
new file mode 100644
index 0000000000..1e61799dc9
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_computed_role/get.py
@@ -0,0 +1,77 @@
+import pytest
+
+from webdriver import Element
+from webdriver.error import NoSuchAlertException
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def get_computed_role(session, element_id):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/computedrole".format(
+ session_id=session.session_id,
+ element_id=element_id))
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_computed_role(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ result = get_computed_role(session, element.id)
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ result = get_computed_role(session, element.id)
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ result = get_computed_role(session, button.id)
+ assert_error(result, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ response = get_computed_role(session, element.id)
+ assert_error(response, "stale element reference")
+
+
+@pytest.mark.parametrize("html,tag,expected", [
+ ("<li role=menuitem>foo", "li", "menuitem"),
+ ("<input role=searchbox>", "input", "searchbox"),
+ ("<img role=presentation>", "img", "presentation")])
+def test_computed_roles(session, inline, html, tag, expected):
+ session.url = inline(html)
+ element = session.find.css(tag, all=False)
+ result = get_computed_role(session, element.id)
+ assert_success(result, expected)
diff --git a/testing/web-platform/tests/webdriver/tests/get_current_url/__init__.py b/testing/web-platform/tests/webdriver/tests/get_current_url/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_current_url/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_current_url/file.py b/testing/web-platform/tests/webdriver/tests/get_current_url/file.py
new file mode 100644
index 0000000000..ef6ae23835
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_current_url/file.py
@@ -0,0 +1,23 @@
+from tests.support import platform_name
+from tests.support.asserts import assert_success
+
+
+def get_current_url(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/url".format(**vars(session)))
+
+
+def test_get_current_url_file_protocol(session, server_config):
+ # tests that the browsing context remains the same
+ # when navigated privileged documents
+ path = server_config["doc_root"]
+ if platform_name == "windows":
+ # Convert the path into the format eg. /c:/foo/bar
+ path = "/{}".format(path.replace("\\", "/"))
+ url = u"file://{}".format(path)
+ session.url = url
+
+ response = get_current_url(session)
+ if response.status == 200 and response.body['value'].endswith('/'):
+ url += '/'
+ assert_success(response, url)
diff --git a/testing/web-platform/tests/webdriver/tests/get_current_url/get.py b/testing/web-platform/tests/webdriver/tests/get_current_url/get.py
new file mode 100644
index 0000000000..baeab0960b
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_current_url/get.py
@@ -0,0 +1,74 @@
+import pytest
+
+from tests.support import platform_name
+from tests.support.asserts import assert_error, assert_success
+
+
+@pytest.fixture
+def doc(inline):
+ return inline("<p>frame")
+
+
+def get_current_url(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/url".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = get_current_url(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame, doc):
+ session.url = doc
+
+ response = get_current_url(session)
+ assert_success(response, doc)
+
+
+def test_get_current_url_matches_location(session, doc):
+ session.url = doc
+
+ response = get_current_url(session)
+ assert_success(response, doc)
+
+
+def test_get_current_url_payload(session):
+ session.start()
+
+ response = get_current_url(session)
+ value = assert_success(response)
+ assert isinstance(value, str)
+
+
+def test_get_current_url_special_pages(session):
+ session.url = "about:blank"
+
+ response = get_current_url(session)
+ assert_success(response, "about:blank")
+
+
+# TODO(ato): Test for http:// and https:// protocols.
+# We need to expose a fixture for accessing
+# documents served by wptserve in order to test this.
+
+
+def test_set_malformed_url(session):
+ response = session.transport.send(
+ "POST",
+ "session/%s/url" % session.session_id, {"url": "foo"})
+
+ assert_error(response, "invalid argument")
+
+
+def test_get_current_url_after_modified_location(session, doc):
+ session.url = doc
+
+ response = get_current_url(session)
+ assert_success(response, doc)
+
+ hash_doc = "{}#foo".format(doc)
+ session.url = hash_doc
+
+ response = get_current_url(session)
+ assert_success(response, hash_doc)
diff --git a/testing/web-platform/tests/webdriver/tests/get_current_url/iframe.py b/testing/web-platform/tests/webdriver/tests/get_current_url/iframe.py
new file mode 100644
index 0000000000..80a960ce8a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_current_url/iframe.py
@@ -0,0 +1,75 @@
+import pytest
+
+from tests.support.asserts import assert_success
+
+
+"""
+Tests that WebDriver can transcend site origins.
+
+Many modern browsers impose strict cross-origin checks,
+and WebDriver should be able to transcend these.
+
+Although an implementation detail, certain browsers
+also enforce process isolation based on site origin.
+This is known to sometimes cause problems for WebDriver implementations.
+"""
+
+
+@pytest.fixture
+def frame_doc(inline):
+ return inline("<p>frame")
+
+
+@pytest.fixture
+def one_frame_doc(inline, frame_doc):
+ return inline("<iframe src='%s'></iframe>" % frame_doc)
+
+
+@pytest.fixture
+def nested_frames_doc(inline, one_frame_doc):
+ return inline("<iframe src='%s'></iframe>" % one_frame_doc)
+
+
+def get_current_url(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/url".format(**vars(session)))
+
+
+def test_iframe(session, one_frame_doc):
+ top_level_doc = one_frame_doc
+ session.url = top_level_doc
+
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+ session.find.css("p", all=False)
+
+ response = get_current_url(session)
+ assert_success(response, top_level_doc)
+
+
+def test_nested_iframe(session, nested_frames_doc):
+ session.url = nested_frames_doc
+ top_level_doc = session.url
+
+ outer_frame = session.find.css("iframe", all=False)
+ session.switch_frame(outer_frame)
+
+ inner_frame = session.find.css("iframe", all=False)
+ session.switch_frame(inner_frame)
+ session.find.css("p", all=False)
+
+ response = get_current_url(session)
+ assert_success(response, top_level_doc)
+
+
+@pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"])
+def test_origin(session, inline, iframe, domain):
+ top_level_doc = inline(iframe("<p>frame", domain=domain))
+
+ session.url = top_level_doc
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+ session.find.css("p", all=False)
+
+ response = get_current_url(session)
+ assert_success(response, top_level_doc)
diff --git a/testing/web-platform/tests/webdriver/tests/get_current_url/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_current_url/user_prompts.py
new file mode 100644
index 0000000000..d657c18824
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_current_url/user_prompts.py
@@ -0,0 +1,111 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def get_current_url(session):
+ return session.transport.send("GET", "session/%s/url" % session.session_id)
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<p id=1>")
+ expected_url = session.url
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_current_url(session)
+ assert_success(response, expected_url)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<p id=1>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_current_url(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<p id=1>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_current_url(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_attribute/__init__.py b/testing/web-platform/tests/webdriver/tests/get_element_attribute/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_attribute/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_attribute/get.py b/testing/web-platform/tests/webdriver/tests/get_element_attribute/get.py
new file mode 100644
index 0000000000..87fd5f3333
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_attribute/get.py
@@ -0,0 +1,156 @@
+import pytest
+
+from webdriver import Element
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def get_element_attribute(session, element, attr):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/attribute/{attr}".format(
+ session_id=session.session_id,
+ element_id=element,
+ attr=attr))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ original_handle, element = closed_window
+ response = get_element_attribute(session, element.id, "id")
+ assert_error(response, "no such window")
+ response = get_element_attribute(session, "foo", "id")
+ assert_error(response, "no such window")
+ session.window_handle = original_handle
+ response = get_element_attribute(session, element.id, "id")
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_element_attribute(session, "foo", "id")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = get_element_attribute(session, element.id, "id")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = get_element_attribute(session, element.id, "id")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+ response = get_element_attribute(session, button.id, "id")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ result = get_element_attribute(session, element.id, "id")
+ assert_error(result, "stale element reference")
+
+
+def test_normal(session, inline):
+ # 13.2 Step 5
+ session.url = inline("<input type=checkbox>")
+ element = session.find.css("input", all=False)
+ result = get_element_attribute(session, element.id, "input")
+ assert_success(result, None)
+
+ # Check we are not returning the property which will have a different value
+ assert session.execute_script("return document.querySelector('input').checked") is False
+ element.click()
+ assert session.execute_script("return document.querySelector('input').checked") is True
+ result = get_element_attribute(session, element.id, "input")
+ assert_success(result, None)
+
+
+@pytest.mark.parametrize("tag,attrs", [
+ ("audio", ["autoplay", "controls", "loop", "muted"]),
+ ("button", ["autofocus", "disabled", "formnovalidate"]),
+ ("details", ["open"]),
+ ("dialog", ["open"]),
+ ("fieldset", ["disabled"]),
+ ("form", ["novalidate"]),
+ ("iframe", ["allowfullscreen"]),
+ ("img", ["ismap"]),
+ ("input", [
+ "autofocus", "checked", "disabled", "formnovalidate", "multiple", "readonly", "required"
+ ]),
+ ("menuitem", ["checked", "default", "disabled"]),
+ ("ol", ["reversed"]),
+ ("optgroup", ["disabled"]),
+ ("option", ["disabled", "selected"]),
+ ("script", ["async", "defer"]),
+ ("select", ["autofocus", "disabled", "multiple", "required"]),
+ ("textarea", ["autofocus", "disabled", "readonly", "required"]),
+ ("track", ["default"]),
+ ("video", ["autoplay", "controls", "loop", "muted"])
+])
+def test_boolean_attribute(session, inline, tag, attrs):
+ for attr in attrs:
+ session.url = inline("<{0} {1}>".format(tag, attr))
+ element = session.find.css(tag, all=False)
+ result = get_element_attribute(session, element.id, attr)
+ assert_success(result, "true")
+
+
+def test_global_boolean_attributes(session, inline):
+ session.url = inline("<p hidden>foo")
+ element = session.find.css("p", all=False)
+ result = get_element_attribute(session, element.id, "hidden")
+
+ assert_success(result, "true")
+
+ session.url = inline("<p>foo")
+ element = session.find.css("p", all=False)
+ result = get_element_attribute(session, element.id, "hidden")
+ assert_success(result, None)
+
+ session.url = inline("<p itemscope>foo")
+ element = session.find.css("p", all=False)
+ result = get_element_attribute(session, element.id, "itemscope")
+
+ assert_success(result, "true")
+
+ session.url = inline("<p>foo")
+ element = session.find.css("p", all=False)
+ result = get_element_attribute(session, element.id, "itemscope")
+ assert_success(result, None)
+
+
+@pytest.mark.parametrize("is_relative", [True, False], ids=["relative", "absolute"])
+def test_anchor_href(session, inline, url, is_relative):
+ href = "/foo.html" if is_relative else url("/foo.html")
+
+ session.url = inline("<a href='{}'>foo</a>".format(href))
+ element = session.find.css("a", all=False)
+
+ response = get_element_attribute(session, element.id, "href")
+ assert_success(response, href)
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_attribute/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_element_attribute/user_prompts.py
new file mode 100644
index 0000000000..009cb1e5fa
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_attribute/user_prompts.py
@@ -0,0 +1,117 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
+
+
+def get_element_attribute(session, element, attr):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/attribute/{attr}".format(
+ session_id=session.session_id,
+ element_id=element,
+ attr=attr))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_attribute(session, element.id, "id")
+ assert_success(response, "foo")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_attribute(session, element.id, "id")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_attribute(session, element.id, "id")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_css_value/__init__.py b/testing/web-platform/tests/webdriver/tests/get_element_css_value/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_css_value/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_css_value/get.py b/testing/web-platform/tests/webdriver/tests/get_element_css_value/get.py
new file mode 100644
index 0000000000..d178fc25f1
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_css_value/get.py
@@ -0,0 +1,97 @@
+import pytest
+
+from webdriver import Element
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def get_element_css_value(session, element_id, prop):
+ return session.transport.send(
+ "GET",
+ "session/{session_id}/element/{element_id}/css/{prop}".format(
+ session_id=session.session_id,
+ element_id=element_id,
+ prop=prop
+ )
+ )
+
+
+def test_no_top_browsing_context(session, closed_window):
+ original_handle, element = closed_window
+ response = get_element_css_value(session, element.id, "display")
+ assert_error(response, "no such window")
+ response = get_element_css_value(session, "foo", "bar")
+ assert_error(response, "no such window")
+
+ session.window_handle = original_handle
+ response = get_element_css_value(session, element.id, "display")
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_element_css_value(session, "foo", "bar")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = get_element_css_value(session, element.id, "display")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = get_element_css_value(session, element.id, "display")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = get_element_css_value(session, button.id, "display")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ result = get_element_css_value(session, element.id, "display")
+ assert_error(result, "stale element reference")
+
+
+def test_property_name_value(session, inline):
+ session.url = inline("""<input style="display: block">""")
+ element = session.find.css("input", all=False)
+
+ result = get_element_css_value(session, element.id, "display")
+ assert_success(result, "block")
+
+
+def test_property_name_not_existent(session, inline):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ result = get_element_css_value(session, element.id, "foo")
+ assert_success(result, "")
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_css_value/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_element_css_value/user_prompts.py
new file mode 100644
index 0000000000..b1f9a3fb0a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_css_value/user_prompts.py
@@ -0,0 +1,120 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
+
+
+def get_element_css_value(session, element_id, prop):
+ return session.transport.send(
+ "GET",
+ "session/{session_id}/element/{element_id}/css/{prop}".format(
+ session_id=session.session_id,
+ element_id=element_id,
+ prop=prop
+ )
+ )
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("""<input style="display: block">""")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_css_value(session, element.id, "display")
+ assert_success(response, "block")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("""<input style="display: block">""")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_css_value(session, element.id, "display")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("""<input style="display: block">""")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_css_value(session, element.id, "display")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_property/__init__.py b/testing/web-platform/tests/webdriver/tests/get_element_property/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_property/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_property/get.py b/testing/web-platform/tests/webdriver/tests/get_element_property/get.py
new file mode 100644
index 0000000000..7d9d7083ac
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_property/get.py
@@ -0,0 +1,209 @@
+import pytest
+
+from webdriver import Element, Frame, ShadowRoot, Window
+
+from tests.support.asserts import assert_error, assert_same_element, assert_success
+
+
+def get_element_property(session, element_id, prop):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/property/{prop}".format(
+ session_id=session.session_id,
+ element_id=element_id,
+ prop=prop))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ original_handle, element = closed_window
+ response = get_element_property(session, element.id, "value")
+ assert_error(response, "no such window")
+ response = get_element_property(session, "foo", "id")
+ assert_error(response, "no such window")
+
+ session.window_handle = original_handle
+ response = get_element_property(session, element.id, "value")
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_element_property(session, "foo", "id")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = get_element_property(session, element.id, "id")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = get_element_property(session, element.id, "id")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = get_element_property(session, button.id, "id")
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ result = get_element_property(session, element.id, "id")
+ assert_error(result, "stale element reference")
+
+
+def test_property_non_existent(session, inline):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ response = get_element_property(session, element.id, "foo")
+ assert_success(response, None)
+ assert session.execute_script("return arguments[0].foo", args=(element,)) is None
+
+
+def test_content_attribute(session, inline):
+ session.url = inline("<input value=foobar>")
+ element = session.find.css("input", all=False)
+
+ response = get_element_property(session, element.id, "value")
+ assert_success(response, "foobar")
+
+
+def test_idl_attribute(session, inline):
+ session.url = inline("<input value=foo>")
+ element = session.find.css("input", all=False)
+ session.execute_script("""arguments[0].value = "bar";""", args=(element,))
+
+ response = get_element_property(session, element.id, "value")
+ assert_success(response, "bar")
+
+
+@pytest.mark.parametrize("js_primitive,py_primitive", [
+ ("\"foobar\"", "foobar"),
+ (42, 42),
+ ([], []),
+ ({}, {}),
+ ("null", None),
+ ("undefined", None),
+])
+def test_primitives(session, inline, js_primitive, py_primitive):
+ session.url = inline("""
+ <input>
+
+ <script>
+ const input = document.querySelector("input");
+ input.foobar = {js_primitive};
+ </script>
+ """.format(js_primitive=js_primitive))
+ element = session.find.css("input", all=False)
+
+ response = get_element_property(session, element.id, "foobar")
+ assert_success(response, py_primitive)
+
+
+@pytest.mark.parametrize("js_primitive,py_primitive", [
+ ("\"foobar\"", "foobar"),
+ (42, 42),
+ ([], []),
+ ({}, {}),
+ ("null", None),
+ ("undefined", None),
+])
+def test_primitives_set_by_execute_script(session, inline, js_primitive, py_primitive):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+ session.execute_script("arguments[0].foobar = {}".format(js_primitive), args=(element,))
+
+ response = get_element_property(session, element.id, "foobar")
+ assert_success(response, py_primitive)
+
+
+@pytest.mark.parametrize("js_web_reference,py_web_reference", [
+ ("element", Element),
+ ("frame", Frame),
+ ("shadowRoot", ShadowRoot),
+ ("window", Window),
+])
+def test_web_reference(session, inline, js_web_reference, py_web_reference):
+ session.url = inline("""
+ <div id="parent"></div>
+ <p id="element"></p>
+ <iframe id="frame"></iframe>
+ <shadow-element id="custom"></shadow-element>
+
+ <script>
+ customElements.define("shadow-element",
+ class extends HTMLElement {
+ constructor() {
+ super();
+ this.attachShadow({ mode: "open" }).innerHTML = "<p>foo";
+ }
+ }
+ );
+
+ const parent = document.getElementById("parent");
+ parent.__element = document.getElementById("element");
+ parent.__frame = document.getElementById("frame").contentWindow;
+ parent.__shadowRoot = document.getElementById("custom").shadowRoot;
+ parent.__window = document.defaultView;
+ </script>
+ """)
+
+ elem = session.find.css("#parent", all=False)
+ response = get_element_property(session, elem.id, "__{}".format(js_web_reference))
+ value = assert_success(response)
+
+ assert isinstance(value, dict)
+ assert py_web_reference.identifier in value
+ assert isinstance(value[py_web_reference.identifier], str)
+
+
+def test_mutated_element(session, inline):
+ session.url = inline("<input type=checkbox>")
+ element = session.find.css("input", all=False)
+ element.click()
+
+ checked = session.execute_script("""
+ return arguments[0].hasAttribute('checked')
+ """, args=(element,))
+ assert checked is False
+
+ response = get_element_property(session, element.id, "checked")
+ assert_success(response, True)
+
+
+@pytest.mark.parametrize("is_relative", [True, False], ids=["relative", "absolute"])
+def test_anchor_href(session, inline, url, is_relative):
+ href = "/foo.html" if is_relative else url("/foo.html")
+
+ session.url = inline("<a href='{}'>foo</a>".format(href))
+ element = session.find.css("a", all=False)
+
+ response = get_element_property(session, element.id, "href")
+ assert_success(response, url("/foo.html"))
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_property/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_element_property/user_prompts.py
new file mode 100644
index 0000000000..e5e7694786
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_property/user_prompts.py
@@ -0,0 +1,115 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
+
+
+def get_element_property(session, element_id, name):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/property/{name}".format(
+ session_id=session.session_id, element_id=element_id, name=name))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_property(session, element.id, "id")
+ assert_success(response, "foo")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_property(session, element.id, "id")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_property(session, element.id, "id")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_rect/__init__.py b/testing/web-platform/tests/webdriver/tests/get_element_rect/__init__.py
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_rect/__init__.py
@@ -0,0 +1 @@
+
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_rect/get.py b/testing/web-platform/tests/webdriver/tests/get_element_rect/get.py
new file mode 100644
index 0000000000..113bc83838
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_rect/get.py
@@ -0,0 +1,89 @@
+import pytest
+
+from webdriver import Element
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.helpers import element_rect
+
+
+def get_element_rect(session, element_id):
+ return session.transport.send(
+ "GET",
+ "session/{session_id}/element/{element_id}/rect".format(
+ session_id=session.session_id,
+ element_id=element_id,
+ )
+ )
+
+
+def test_no_top_browsing_context(session, closed_window):
+ original_handle, element = closed_window
+ response = get_element_rect(session, element.id)
+ assert_error(response, "no such window")
+ response = get_element_rect(session, "foo")
+ assert_error(response, "no such window")
+
+ session.window_handle = original_handle
+ response = get_element_rect(session, element.id)
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_element_rect(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = get_element_rect(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = get_element_rect(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = get_element_rect(session, button.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ result = get_element_rect(session, element.id)
+ assert_error(result, "stale element reference")
+
+
+def test_basic(session, inline):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ result = get_element_rect(session, element.id)
+ assert_success(result, element_rect(session, element))
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_rect/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_element_rect/user_prompts.py
new file mode 100644
index 0000000000..2013160338
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_rect/user_prompts.py
@@ -0,0 +1,120 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
+from tests.support.helpers import element_rect
+
+
+def get_element_rect(session, element_id):
+ return session.transport.send(
+ "GET",
+ "session/{session_id}/element/{element_id}/rect".format(
+ session_id=session.session_id,
+ element_id=element_id,
+ )
+ )
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_rect(session, element.id)
+ assert_success(response, element_rect(session, element))
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_rect(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_rect(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/__init__.py b/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/conftest.py b/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/conftest.py
new file mode 100644
index 0000000000..4ca71025d6
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/conftest.py
@@ -0,0 +1,22 @@
+import pytest
+
+@pytest.fixture
+def checkbox_dom(inline):
+ return """
+ <style>
+ custom-checkbox-element {
+ display:block; width:20px; height:20px;
+ }
+ </style>
+ <custom-checkbox-element></custom-checkbox-element>
+ <script>
+ customElements.define('custom-checkbox-element',
+ class extends HTMLElement {
+ constructor() {
+ super();
+ this.attachShadow({mode: 'open'}).innerHTML = `
+ <div><input type="checkbox"/></div>
+ `;
+ }
+ });
+ </script>"""
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/get.py b/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/get.py
new file mode 100644
index 0000000000..55b2089a67
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/get.py
@@ -0,0 +1,96 @@
+import pytest
+
+from webdriver import Element
+
+from tests.support.asserts import assert_error, assert_same_element, assert_success
+
+
+def get_shadow_root(session, element_id):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/shadow".format(
+ session_id=session.session_id,
+ element_id=element_id))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ original_handle, element = closed_window
+ response = get_shadow_root(session, element.id)
+ assert_error(response, "no such window")
+ response = get_shadow_root(session, "foo")
+ assert_error(response, "no such window")
+
+ session.window_handle = original_handle
+ response = get_shadow_root(session, element.id)
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_shadow_root(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = get_shadow_root(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = get_shadow_root(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = get_shadow_root(session, button.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, checkbox_dom, as_frame):
+ element = stale_element(checkbox_dom, "custom-checkbox-element", as_frame=as_frame)
+
+ result = get_shadow_root(session, element.id)
+ assert_error(result, "stale element reference")
+
+
+def test_get_shadow_root(session, inline, checkbox_dom):
+ session.url = inline(checkbox_dom)
+ expected = session.execute_script(
+ "return document.querySelector('custom-checkbox-element').shadowRoot.host")
+ custom_element = session.find.css("custom-checkbox-element", all=False)
+ response = get_shadow_root(session, custom_element.id)
+ value = assert_success(response)
+ assert isinstance(value, dict)
+ assert "shadow-6066-11e4-a52e-4f735466cecf" in value
+ assert_same_element(session, custom_element, expected)
+
+
+def test_no_shadow_root(session, inline):
+ session.url = inline("<div><p>no shadow root</p></div>")
+ element = session.find.css("div", all=False)
+ response = get_shadow_root(session, element.id)
+ assert_error(response, "no such shadow root")
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/user_prompts.py
new file mode 100644
index 0000000000..b94650a6f0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_shadow_root/user_prompts.py
@@ -0,0 +1,117 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
+
+
+def get_shadow_root(session, element_id):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/shadow".format(
+ session_id=session.session_id,
+ element_id=element_id))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline, checkbox_dom):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline(checkbox_dom)
+ element = session.find.css("custom-checkbox-element", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_shadow_root(session, element.id)
+ value = assert_success(response)
+ assert isinstance(value, dict)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline, checkbox_dom):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline(checkbox_dom)
+ element = session.find.css("custom-checkbox-element", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_shadow_root(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline, checkbox_dom):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline(checkbox_dom)
+ element = session.find.css("custom-checkbox-element", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_shadow_root(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_tag_name/__init__.py b/testing/web-platform/tests/webdriver/tests/get_element_tag_name/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_tag_name/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_tag_name/get.py b/testing/web-platform/tests/webdriver/tests/get_element_tag_name/get.py
new file mode 100644
index 0000000000..bdfebc2ce7
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_tag_name/get.py
@@ -0,0 +1,85 @@
+import pytest
+
+from webdriver import Element
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def get_element_tag_name(session, element_id):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/name".format(
+ session_id=session.session_id,
+ element_id=element_id))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ original_handle, element = closed_window
+ response = get_element_tag_name(session, element.id)
+ assert_error(response, "no such window")
+ response = get_element_tag_name(session, "foo")
+ assert_error(response, "no such window")
+
+ session.window_handle = original_handle
+ response = get_element_tag_name(session, element.id)
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_element_tag_name(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = get_element_tag_name(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = get_element_tag_name(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = get_element_tag_name(session, button.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ result = get_element_tag_name(session, element.id)
+ assert_error(result, "stale element reference")
+
+
+def test_get_element_tag_name(session, inline):
+ session.url = inline("<input id=foo>")
+ element = session.find.css("input", all=False)
+
+ result = get_element_tag_name(session, element.id)
+ assert_success(result, "input")
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_tag_name/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_element_tag_name/user_prompts.py
new file mode 100644
index 0000000000..89697d0ad6
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_tag_name/user_prompts.py
@@ -0,0 +1,114 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
+
+
+def get_element_tag_name(session, element_id):
+ return session.transport.send("GET", "session/{session_id}/element/{element_id}/name".format(
+ session_id=session.session_id, element_id=element_id))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_tag_name(session, element.id)
+ assert_success(response, "input")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_tag_name(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_tag_name(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_text/__init__.py b/testing/web-platform/tests/webdriver/tests/get_element_text/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_text/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_text/get.py b/testing/web-platform/tests/webdriver/tests/get_element_text/get.py
new file mode 100644
index 0000000000..b2c8a09f8c
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_text/get.py
@@ -0,0 +1,99 @@
+import pytest
+
+from webdriver import Element
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def get_element_text(session, element_id):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/text".format(
+ session_id=session.session_id,
+ element_id=element_id))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ original_handle, element = closed_window
+ response = get_element_text(session, element.id)
+ assert_error(response, "no such window")
+ response = get_element_text(session, "foo")
+ assert_error(response, "no such window")
+
+ session.window_handle = original_handle
+ response = get_element_text(session, element.id)
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_element_text(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = get_element_text(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = get_element_text(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = get_element_text(session, button.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ response = get_element_text(session, element.id)
+ assert_error(response, "stale element reference")
+
+
+def test_getting_text_of_a_non_existant_element_is_an_error(session, inline):
+ session.url = inline("""<body>Hello world</body>""")
+
+ result = get_element_text(session, "foo")
+ assert_error(result, "no such element")
+
+
+def test_read_element_text(session, inline):
+ session.url = inline("Before f<span id='id'>oo</span> after")
+ element = session.find.css("#id", all=False)
+
+ result = get_element_text(session, element.id)
+ assert_success(result, "oo")
+
+
+def test_pretty_print_xml(session, inline):
+ session.url = inline("<xml><foo>che<bar>ese</bar></foo></xml>", doctype="xml")
+
+ elem = session.find.css("foo", all=False)
+ assert elem.text == "cheese"
diff --git a/testing/web-platform/tests/webdriver/tests/get_element_text/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_element_text/user_prompts.py
new file mode 100644
index 0000000000..9f0bb386cd
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_element_text/user_prompts.py
@@ -0,0 +1,116 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
+
+
+def get_element_text(session, element_id):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/text".format(
+ session_id=session.session_id,
+ element_id=element_id))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<p id=foo>bar</p>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_text(session, element.id)
+ assert_success(response, "bar")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<p id=foo>bar</p>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_text(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<p id=foo>bar</p>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_element_text(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_named_cookie/__init__.py b/testing/web-platform/tests/webdriver/tests/get_named_cookie/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_named_cookie/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_named_cookie/get.py b/testing/web-platform/tests/webdriver/tests/get_named_cookie/get.py
new file mode 100644
index 0000000000..d1e83b6a81
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_named_cookie/get.py
@@ -0,0 +1,145 @@
+import pytest
+
+from datetime import datetime, timedelta
+
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.helpers import clear_all_cookies
+
+
+def get_named_cookie(session, name):
+ return session.transport.send(
+ "GET", "session/{session_id}/cookie/{name}".format(
+ session_id=session.session_id,
+ name=name))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = get_named_cookie(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_named_cookie(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_get_named_session_cookie(session, url):
+ session.url = url("/common/blank.html")
+ clear_all_cookies(session)
+ session.execute_script("document.cookie = 'foo=bar'")
+
+ result = get_named_cookie(session, "foo")
+ cookie = assert_success(result)
+ assert isinstance(cookie, dict)
+
+ # table for cookie conversion
+ # https://w3c.github.io/webdriver/#dfn-table-for-cookie-conversion
+ assert "name" in cookie
+ assert isinstance(cookie["name"], str)
+ assert "value" in cookie
+ assert isinstance(cookie["value"], str)
+ assert "path" in cookie
+ assert isinstance(cookie["path"], str)
+ assert "domain" in cookie
+ assert isinstance(cookie["domain"], str)
+ assert "secure" in cookie
+ assert isinstance(cookie["secure"], bool)
+ assert "httpOnly" in cookie
+ assert isinstance(cookie["httpOnly"], bool)
+ if "expiry" in cookie:
+ assert cookie.get("expiry") is None
+ assert "sameSite" in cookie
+ assert isinstance(cookie["sameSite"], str)
+
+ assert cookie["name"] == "foo"
+ assert cookie["value"] == "bar"
+
+
+def test_get_named_cookie(session, url):
+ session.url = url("/common/blank.html")
+ clear_all_cookies(session)
+
+ # same formatting as Date.toUTCString() in javascript
+ utc_string_format = "%a, %d %b %Y %H:%M:%S"
+ a_day_from_now = (datetime.utcnow() + timedelta(days=1)).strftime(utc_string_format)
+ session.execute_script("document.cookie = 'foo=bar;expires=%s'" % a_day_from_now)
+
+ result = get_named_cookie(session, "foo")
+ cookie = assert_success(result)
+ assert isinstance(cookie, dict)
+
+ assert "name" in cookie
+ assert isinstance(cookie["name"], str)
+ assert "value" in cookie
+ assert isinstance(cookie["value"], str)
+ assert "expiry" in cookie
+ assert isinstance(cookie["expiry"], int)
+ assert "sameSite" in cookie
+ assert isinstance(cookie["sameSite"], str)
+
+ assert cookie["name"] == "foo"
+ assert cookie["value"] == "bar"
+ # convert from seconds since epoch
+ assert datetime.utcfromtimestamp(
+ cookie["expiry"]).strftime(utc_string_format) == a_day_from_now
+
+
+def test_duplicated_cookie(session, url, server_config, inline):
+ new_cookie = {
+ "name": "hello",
+ "value": "world",
+ "domain": server_config["browser_host"],
+ "path": "/",
+ "http_only": False,
+ "secure": False
+ }
+
+ session.url = url("/common/blank.html")
+ clear_all_cookies(session)
+
+ session.set_cookie(**new_cookie)
+ session.url = inline("""
+ <script>
+ document.cookie = '{name}=newworld; domain={domain}; path=/';
+ </script>""".format(
+ name=new_cookie["name"],
+ domain=server_config["browser_host"]))
+
+ result = get_named_cookie(session, new_cookie["name"])
+ cookie = assert_success(result)
+ assert isinstance(cookie, dict)
+
+ assert "name" in cookie
+ assert isinstance(cookie["name"], str)
+ assert "value" in cookie
+ assert isinstance(cookie["value"], str)
+ assert "sameSite" in cookie
+ assert isinstance(cookie["sameSite"], str)
+
+ assert cookie["name"] == new_cookie["name"]
+ assert cookie["value"] == "newworld"
+
+
+@pytest.mark.parametrize("same_site", ["None", "Lax", "Strict"])
+@pytest.mark.capabilities({"acceptInsecureCerts": True})
+def test_get_cookie_with_same_site_flag(session, url, same_site):
+ session.url = url("/common/blank.html", protocol="https")
+ clear_all_cookies(session)
+
+ session.execute_script("document.cookie = 'foo=bar;Secure;SameSite=%s'" % same_site)
+
+ result = get_named_cookie(session, "foo")
+ cookie = assert_success(result)
+ assert isinstance(cookie, dict)
+
+ assert "name" in cookie
+ assert isinstance(cookie["name"], str)
+ assert "value" in cookie
+ assert isinstance(cookie["value"], str)
+ assert "sameSite" in cookie
+ assert isinstance(cookie["sameSite"], str)
+
+ assert cookie["name"] == "foo"
+ assert cookie["value"] == "bar"
+ assert cookie["sameSite"] == same_site
diff --git a/testing/web-platform/tests/webdriver/tests/get_named_cookie/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_named_cookie/user_prompts.py
new file mode 100644
index 0000000000..3ef52a58a4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_named_cookie/user_prompts.py
@@ -0,0 +1,118 @@
+# META: timeout=long
+
+import pytest
+
+from webdriver.error import NoSuchCookieException
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def get_named_cookie(session, name):
+ return session.transport.send(
+ "GET", "session/{session_id}/cookie/{name}".format(
+ session_id=session.session_id,
+ name=name))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, create_cookie):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ create_cookie("foo", value="bar", path="/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_named_cookie(session, "foo")
+ cookie = assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert cookie["name"] == "foo"
+ assert cookie["value"] == "bar"
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, create_cookie):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ create_cookie("foo", value="bar", path="/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_named_cookie(session, "foo")
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, create_cookie):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ create_cookie("foo", value="bar", path="/common/blank.html")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_named_cookie(session, "foo")
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_page_source/__init__.py b/testing/web-platform/tests/webdriver/tests/get_page_source/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_page_source/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_page_source/source.py b/testing/web-platform/tests/webdriver/tests/get_page_source/source.py
new file mode 100644
index 0000000000..cc4e208835
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_page_source/source.py
@@ -0,0 +1,25 @@
+from tests.support.asserts import assert_error, assert_success
+
+
+def get_page_source(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/source".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = get_page_source(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_page_source(session)
+ assert_error(response, "no such window")
+
+
+def test_source_matches_outer_html(session, inline):
+ session.url = inline("<html><head><title>Cheese</title><body>Peas")
+
+ expected = session.execute_script("return document.documentElement.outerHTML")
+
+ response = get_page_source(session)
+ assert_success(response, expected)
diff --git a/testing/web-platform/tests/webdriver/tests/get_page_source/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_page_source/user_prompts.py
new file mode 100644
index 0000000000..13cb31595e
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_page_source/user_prompts.py
@@ -0,0 +1,112 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
+
+
+def get_page_source(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/source".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<div/>")
+ expected = session.execute_script("return document.documentElement.outerHTML")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_page_source(session)
+ assert_success(response, expected)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<div/>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_page_source(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<div/>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_page_source(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/get_timeouts/__init__.py b/testing/web-platform/tests/webdriver/tests/get_timeouts/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_timeouts/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_timeouts/get.py b/testing/web-platform/tests/webdriver/tests/get_timeouts/get.py
new file mode 100644
index 0000000000..9601c00d90
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_timeouts/get.py
@@ -0,0 +1,43 @@
+from tests.support.asserts import assert_success
+
+
+def get_timeouts(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/timeouts".format(**vars(session)))
+
+
+def test_get_timeouts(session):
+ response = get_timeouts(session)
+
+ assert_success(response)
+ assert "value" in response.body
+ assert isinstance(response.body["value"], dict)
+
+ value = response.body["value"]
+ assert "script" in value
+ assert "implicit" in value
+ assert "pageLoad" in value
+
+ assert isinstance(value["script"], int)
+ assert isinstance(value["implicit"], int)
+ assert isinstance(value["pageLoad"], int)
+
+
+def test_get_default_timeouts(session):
+ response = get_timeouts(session)
+
+ assert_success(response)
+ assert response.body["value"]["script"] == 30000
+ assert response.body["value"]["implicit"] == 0
+ assert response.body["value"]["pageLoad"] == 300000
+
+
+def test_get_new_timeouts(session):
+ session.timeouts.script = 60
+ session.timeouts.implicit = 1
+ session.timeouts.page_load = 200
+ response = get_timeouts(session)
+ assert_success(response)
+ assert response.body["value"]["script"] == 60000
+ assert response.body["value"]["implicit"] == 1000
+ assert response.body["value"]["pageLoad"] == 200000
diff --git a/testing/web-platform/tests/webdriver/tests/get_title/__init__.py b/testing/web-platform/tests/webdriver/tests/get_title/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_title/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_title/get.py b/testing/web-platform/tests/webdriver/tests/get_title/get.py
new file mode 100644
index 0000000000..e696ec3403
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_title/get.py
@@ -0,0 +1,56 @@
+from tests.support.asserts import assert_error, assert_success
+
+
+def get_title(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/title".format(**vars(session)))
+
+
+def test_payload(session):
+ session.start()
+
+ response = get_title(session)
+ value = assert_success(response)
+ assert isinstance(value, str)
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = get_title(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame, inline):
+ session.url = inline("<title>Foo</title>")
+
+ response = get_title(session)
+ assert_success(response, "Foo")
+
+
+def test_with_duplicated_title(session, inline):
+ session.url = inline("<title>First</title><title>Second</title>")
+
+ result = get_title(session)
+ assert_success(result, "First")
+
+
+def test_without_title(session, inline):
+ session.url = inline("<h2>Hello</h2>")
+
+ result = get_title(session)
+ assert_success(result, "")
+
+
+def test_after_modification(session, inline):
+ session.url = inline("<title>Initial</title><h2>Hello</h2>")
+ session.execute_script("document.title = 'Updated'")
+
+ result = get_title(session)
+ assert_success(result, "Updated")
+
+
+def test_strip_and_collapse(session, inline):
+ document = "<title> a b\tc\nd\t \n e\t\n </title><h2>Hello</h2>"
+ session.url = inline(document)
+
+ result = get_title(session)
+ assert_success(result, "a b c d e")
diff --git a/testing/web-platform/tests/webdriver/tests/get_title/iframe.py b/testing/web-platform/tests/webdriver/tests/get_title/iframe.py
new file mode 100644
index 0000000000..9c5ab0b595
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_title/iframe.py
@@ -0,0 +1,80 @@
+import pytest
+
+from tests.support.asserts import assert_success
+
+
+"""
+Tests that WebDriver can transcend site origins.
+
+Many modern browsers impose strict cross-origin checks,
+and WebDriver should be able to transcend these.
+
+Although an implementation detail, certain browsers
+also enforce process isolation based on site origin.
+This is known to sometimes cause problems for WebDriver implementations.
+"""
+
+
+@pytest.fixture
+def frame_doc(inline):
+ return inline("<title>cheese</title><p>frame")
+
+
+@pytest.fixture
+def one_frame_doc(inline, frame_doc):
+ return inline("<title>bar</title><iframe src='%s'></iframe>" % frame_doc)
+
+
+@pytest.fixture
+def nested_frames_doc(inline, one_frame_doc):
+ return inline("<title>foo</title><iframe src='%s'></iframe>" % one_frame_doc)
+
+
+def get_title(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/title".format(**vars(session)))
+
+
+def test_no_iframe(session, inline):
+ session.url = inline("<title>Foobar</title><h2>Hello</h2>")
+
+ result = get_title(session)
+ assert_success(result, "Foobar")
+
+
+def test_iframe(session, one_frame_doc):
+ session.url = one_frame_doc
+
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+ session.find.css("p", all=False)
+
+ response = get_title(session)
+ assert_success(response, "bar")
+
+
+def test_nested_iframe(session, nested_frames_doc):
+ session.url = nested_frames_doc
+
+ outer_frame = session.find.css("iframe", all=False)
+ session.switch_frame(outer_frame)
+
+ inner_frame = session.find.css("iframe", all=False)
+ session.switch_frame(inner_frame)
+ session.find.css("p", all=False)
+
+ response = get_title(session)
+ assert_success(response, "foo")
+
+
+@pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"])
+def test_origin(session, inline, iframe, domain):
+ session.url = inline("<title>foo</title>{}".format(
+ iframe("<title>bar</title><p>frame", domain=domain)))
+
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+ session.find.css("p", all=False)
+
+ response = get_title(session)
+ assert_success(response, "foo")
diff --git a/testing/web-platform/tests/webdriver/tests/get_title/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_title/user_prompts.py
new file mode 100644
index 0000000000..0fd51e46f3
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_title/user_prompts.py
@@ -0,0 +1,134 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def get_title(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/title".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<title>Foo</title>")
+ expected_title = session.title
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_title(session)
+ assert_success(response, expected_title)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<title>Foo</title>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_title(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<title>Foo</title>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_title(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+# The behavior of the `window.print` function is platform-dependent and may not
+# trigger the creation of a dialog at all. Therefore, this test should only be
+# run in contexts that support the dialog (a condition that may not be
+# determined automatically).
+# def test_title_with_non_simple_dialog(session, inline):
+# document = "<title>With non-simple dialog</title><h2>Hello</h2>"
+# spawn = """
+# var done = arguments[0];
+# setTimeout(function() {
+# done();
+# }, 0);
+# setTimeout(function() {
+# window['print']();
+# }, 0);
+# """
+# session.url = inline(document)
+# session.execute_async_script(spawn)
+#
+# result = get_title(session)
+# assert_error(result, "unexpected alert open")
diff --git a/testing/web-platform/tests/webdriver/tests/get_window_handle/__init__.py b/testing/web-platform/tests/webdriver/tests/get_window_handle/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_window_handle/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_window_handle/get.py b/testing/web-platform/tests/webdriver/tests/get_window_handle/get.py
new file mode 100644
index 0000000000..68441da5ef
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_window_handle/get.py
@@ -0,0 +1,43 @@
+import pytest
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def get_window_handle(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/window".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = get_window_handle(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_window_handle(session)
+ assert_success(response, session.window_handle)
+
+
+def test_basic(session):
+ response = get_window_handle(session)
+ assert_success(response, session.window_handle)
+
+
+# Capability needed as long as no valid certificate is available:
+# https://github.com/web-platform-tests/wpt/issues/28847
+@pytest.mark.capabilities({"acceptInsecureCerts": True})
+def test_navigation_with_coop_headers(session, url):
+ base_path = ("/webdriver/tests/support/html/subframe.html" +
+ "?pipe=header(Cross-Origin-Opener-Policy,same-origin")
+
+ session.url = url(base_path, protocol="https")
+ response = get_window_handle(session)
+ first_handle = assert_success(response)
+
+ # navigating to another domain with COOP headers will force a process change
+ # in most browsers
+ session.url = url(base_path, protocol="https", domain="alt")
+ response = get_window_handle(session)
+ second_handle = assert_success(response)
+
+ assert first_handle == second_handle
diff --git a/testing/web-platform/tests/webdriver/tests/get_window_handle/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_window_handle/user_prompts.py
new file mode 100644
index 0000000000..0bd660cfa1
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_window_handle/user_prompts.py
@@ -0,0 +1,61 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_success
+
+
+def get_window_handle(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/window".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ window_handle = session.window_handle
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_window_handle(session)
+ assert_success(response, window_handle)
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_accept(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_accept_and_notify(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_dismiss(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_dismiss_and_notify(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_default(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
diff --git a/testing/web-platform/tests/webdriver/tests/get_window_handles/__init__.py b/testing/web-platform/tests/webdriver/tests/get_window_handles/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_window_handles/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_window_handles/get.py b/testing/web-platform/tests/webdriver/tests/get_window_handles/get.py
new file mode 100644
index 0000000000..8f4361e30c
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_window_handles/get.py
@@ -0,0 +1,37 @@
+from tests.support.asserts import assert_success
+
+
+def get_window_handles(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/window/handles".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = get_window_handles(session)
+ assert_success(response, session.handles)
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_window_handles(session)
+ assert_success(response, session.handles)
+
+
+def test_single_window(session):
+ response = get_window_handles(session)
+ value = assert_success(response)
+
+ assert len(value) == 1
+ assert value == session.handles
+ assert value[0] == session.window_handle
+
+
+def test_multiple_windows(session):
+ original_handle = session.window_handle
+ new_handle = session.new_window()
+
+ response = get_window_handles(session)
+ value = assert_success(response)
+
+ assert len(value) == 2
+ assert original_handle in value
+ assert new_handle in value
diff --git a/testing/web-platform/tests/webdriver/tests/get_window_handles/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_window_handles/user_prompts.py
new file mode 100644
index 0000000000..217e9849b4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_window_handles/user_prompts.py
@@ -0,0 +1,61 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_success
+
+
+def get_window_handles(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/window/handles".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ window_handles = session.handles
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_window_handles(session)
+ assert_success(response, window_handles)
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_accept(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_accept_and_notify(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_dismiss(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_dismiss_and_notify(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_default(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
diff --git a/testing/web-platform/tests/webdriver/tests/get_window_rect/__init__.py b/testing/web-platform/tests/webdriver/tests/get_window_rect/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_window_rect/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/get_window_rect/get.py b/testing/web-platform/tests/webdriver/tests/get_window_rect/get.py
new file mode 100644
index 0000000000..f7592a30e0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_window_rect/get.py
@@ -0,0 +1,31 @@
+from tests.support.asserts import assert_error, assert_success
+
+
+def get_window_rect(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/window/rect".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = get_window_rect(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = get_window_rect(session)
+ assert_success(response)
+
+
+def test_payload(session):
+ expected = session.execute_script("""return {
+ x: window.screenX,
+ y: window.screenY,
+ width: window.outerWidth,
+ height: window.outerHeight
+ }""")
+
+ response = get_window_rect(session)
+ value = assert_success(response)
+
+ assert isinstance(value, dict)
+ assert value == expected
diff --git a/testing/web-platform/tests/webdriver/tests/get_window_rect/user_prompts.py b/testing/web-platform/tests/webdriver/tests/get_window_rect/user_prompts.py
new file mode 100644
index 0000000000..37c8da6bd3
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/get_window_rect/user_prompts.py
@@ -0,0 +1,113 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def get_window_rect(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/window/rect".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ original_rect = session.window.rect
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_window_rect(session)
+ assert_success(response, original_rect)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ original_rect = session.window.rect
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_window_rect(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.window.rect == original_rect
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = get_window_rect(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/idlharness.window.js b/testing/web-platform/tests/webdriver/tests/idlharness.window.js
new file mode 100644
index 0000000000..e92e151d89
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/idlharness.window.js
@@ -0,0 +1,16 @@
+// META: script=/resources/WebIDLParser.js
+// META: script=/resources/idlharness.js
+
+// https://w3c.github.io/webdriver/
+
+"use strict";
+
+idl_test(
+ ["webdriver"],
+ ["html"],
+ idl_array => {
+ idl_array.add_objects({
+ Navigator: ["navigator"]
+ });
+ }
+);
diff --git a/testing/web-platform/tests/webdriver/tests/interface/interface.py b/testing/web-platform/tests/webdriver/tests/interface/interface.py
new file mode 100644
index 0000000000..6a7afcd263
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/interface/interface.py
@@ -0,0 +1,2 @@
+def test_navigator_webdriver_active(session):
+ assert session.execute_script("return navigator.webdriver") is True
diff --git a/testing/web-platform/tests/webdriver/tests/is_element_enabled/__init__.py b/testing/web-platform/tests/webdriver/tests/is_element_enabled/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/is_element_enabled/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/is_element_enabled/enabled.py b/testing/web-platform/tests/webdriver/tests/is_element_enabled/enabled.py
new file mode 100644
index 0000000000..ef9106cce8
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/is_element_enabled/enabled.py
@@ -0,0 +1,161 @@
+import pytest
+
+from webdriver import Element
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def is_element_enabled(session, element_id):
+ return session.transport.send(
+ "GET",
+ "session/{session_id}/element/{element_id}/enabled".format(
+ session_id=session.session_id,
+ element_id=element_id
+ )
+ )
+
+
+def test_no_top_browsing_context(session, closed_window):
+ original_handle, element = closed_window
+ response = is_element_enabled(session, element.id)
+ assert_error(response, "no such window")
+ response = is_element_enabled(session, "foo")
+ assert_error(response, "no such window")
+
+ session.window_handle = original_handle
+ response = is_element_enabled(session, element.id)
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = is_element_enabled(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = is_element_enabled(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = is_element_enabled(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = is_element_enabled(session, button.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ result = is_element_enabled(session, element.id)
+ assert_error(result, "stale element reference")
+
+
+@pytest.mark.parametrize("element", ["button", "input", "select", "textarea"])
+def test_form_control_disabled(session, inline, element):
+ session.url = inline("<{} disabled/>".format(element))
+ element = session.find.css(element, all=False)
+
+ result = is_element_enabled(session, element.id)
+ assert_success(result, False)
+
+
+@pytest.mark.parametrize("element", ["button", "input", "select", "textarea"])
+def test_form_control_enabled(session, inline, element):
+ session.url = inline("<{}/>".format(element))
+ element = session.find.css(element, all=False)
+
+ result = is_element_enabled(session, element.id)
+ assert_success(result, True)
+
+
+@pytest.mark.parametrize("element", ["button", "input", "select", "textarea"])
+def test_fieldset_disabled_descendant(session, inline, element):
+ session.url = inline("<fieldset disabled><{}/></fieldset>".format(element))
+ element = session.find.css(element, all=False)
+
+ result = is_element_enabled(session, element.id)
+ assert_success(result, False)
+
+
+@pytest.mark.parametrize("element", ["button", "input", "select", "textarea"])
+def test_fieldset_enabled_descendant(session, inline, element):
+ session.url = inline("<fieldset><{}/></fieldset>".format(element))
+ element = session.find.css(element, all=False)
+
+ result = is_element_enabled(session, element.id)
+ assert_success(result, True)
+
+
+@pytest.mark.parametrize("element", ["button", "input", "select", "textarea"])
+def test_fieldset_disabled_descendant_legend(session, inline, element):
+ session.url = inline("<fieldset disabled><legend><{}/></legend></fieldset>".format(element))
+ element = session.find.css(element, all=False)
+
+ result = is_element_enabled(session, element.id)
+ assert_success(result, True)
+
+
+@pytest.mark.parametrize("element", ["button", "input", "select", "textarea"])
+def test_fieldset_enabled_descendant_legend(session, inline, element):
+ session.url = inline("<fieldset><legend><{}/></legend></fieldset>".format(element))
+ element = session.find.css(element, all=False)
+
+ result = is_element_enabled(session, element.id)
+ assert_success(result, True)
+
+
+@pytest.mark.parametrize("element", ["button", "input", "select", "textarea"])
+def test_xhtml_form_control_disabled(session, inline, element):
+ session.url = inline("""<{} disabled="disabled"/>""".format(element),
+ doctype="xhtml")
+ element = session.find.css(element, all=False)
+
+ result = is_element_enabled(session, element.id)
+ assert_success(result, False)
+
+
+@pytest.mark.parametrize("element", ["button", "input", "select", "textarea"])
+def test_xhtml_form_control_enabled(session, inline, element):
+ session.url = inline("""<{}/>""".format(element), doctype="xhtml")
+ element = session.find.css(element, all=False)
+
+ result = is_element_enabled(session, element.id)
+ assert_success(result, True)
+
+
+def test_xml_always_not_enabled(session, inline):
+ session.url = inline("""<note></note>""", doctype="xml")
+ element = session.find.css("note", all=False)
+
+ result = is_element_enabled(session, element.id)
+ assert_success(result, False)
diff --git a/testing/web-platform/tests/webdriver/tests/is_element_enabled/user_prompts.py b/testing/web-platform/tests/webdriver/tests/is_element_enabled/user_prompts.py
new file mode 100644
index 0000000000..5dd7d582bd
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/is_element_enabled/user_prompts.py
@@ -0,0 +1,119 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_dialog_handled, assert_success
+
+
+def is_element_enabled(session, element_id):
+ return session.transport.send(
+ "GET",
+ "session/{session_id}/element/{element_id}/enabled".format(
+ session_id=session.session_id,
+ element_id=element_id
+ )
+ )
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input id=foo disabled>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = is_element_enabled(session, element.id)
+ assert_success(response, False)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input id=foo disabled>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = is_element_enabled(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input id=foo disabled>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = is_element_enabled(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/is_element_selected/__init__.py b/testing/web-platform/tests/webdriver/tests/is_element_selected/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/is_element_selected/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/is_element_selected/selected.py b/testing/web-platform/tests/webdriver/tests/is_element_selected/selected.py
new file mode 100644
index 0000000000..a57737590c
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/is_element_selected/selected.py
@@ -0,0 +1,128 @@
+import pytest
+
+from webdriver import Element
+
+from tests.support.asserts import assert_error, assert_success
+
+
+@pytest.fixture
+def check_doc():
+ return """
+ <input id=checked type=checkbox checked>
+ <input id=notChecked type=checkbox>
+ """
+
+
+@pytest.fixture
+def option_doc():
+ return """
+ <select>
+ <option id=notSelected>r-
+ <option id=selected selected>r+
+ </select>
+ """
+
+
+def is_element_selected(session, element_id):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/selected".format(
+ session_id=session.session_id,
+ element_id=element_id))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ original_handle, element = closed_window
+
+ response = is_element_selected(session, element.id)
+ assert_error(response, "no such window")
+ response = is_element_selected(session, "foo")
+ assert_error(response, "no such window")
+
+ session.window_handle = original_handle
+ response = is_element_selected(session, element.id)
+ assert_error(response, "no such element")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = is_element_selected(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = is_element_selected(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = is_element_selected(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = is_element_selected(session, button.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, check_doc, as_frame):
+ element = stale_element(check_doc, "#checked", as_frame=as_frame)
+
+ result = is_element_selected(session, element.id)
+ assert_error(result, "stale element reference")
+
+
+def test_element_checked(session, inline, check_doc):
+ session.url = inline(check_doc)
+ element = session.find.css("#checked", all=False)
+
+ result = is_element_selected(session, element.id)
+ assert_success(result, True)
+
+
+def test_checkbox_not_selected(session, inline, check_doc):
+ session.url = inline(check_doc)
+ element = session.find.css("#notChecked", all=False)
+
+ result = is_element_selected(session, element.id)
+ assert_success(result, False)
+
+
+def test_element_selected(session, inline, option_doc):
+ session.url = inline(option_doc)
+ element = session.find.css("#selected", all=False)
+
+ result = is_element_selected(session, element.id)
+ assert_success(result, True)
+
+
+def test_element_not_selected(session, inline, option_doc):
+ session.url = inline(option_doc)
+ element = session.find.css("#notSelected", all=False)
+
+ result = is_element_selected(session, element.id)
+ assert_success(result, False)
diff --git a/testing/web-platform/tests/webdriver/tests/is_element_selected/user_prompts.py b/testing/web-platform/tests/webdriver/tests/is_element_selected/user_prompts.py
new file mode 100644
index 0000000000..96da2c08bd
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/is_element_selected/user_prompts.py
@@ -0,0 +1,117 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_dialog_handled, assert_success
+
+
+def is_element_selected(session, element_id):
+ return session.transport.send(
+ "GET", "session/{session_id}/element/{element_id}/selected".format(
+ session_id=session.session_id,
+ element_id=element_id))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input id=foo type=checkbox checked>")
+ element = session.find.css("#foo", all=False)
+ element.send_keys("foo")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = is_element_selected(session, element.id)
+ assert_success(response, True)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input id=foo type=checkbox checked>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = is_element_selected(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input id=foo type=checkbox checked>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = is_element_selected(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/maximize_window/__init__.py b/testing/web-platform/tests/webdriver/tests/maximize_window/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/maximize_window/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/maximize_window/maximize.py b/testing/web-platform/tests/webdriver/tests/maximize_window/maximize.py
new file mode 100644
index 0000000000..e233e45a10
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/maximize_window/maximize.py
@@ -0,0 +1,100 @@
+# META: timeout=long
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.helpers import document_hidden, is_fullscreen
+
+
+def maximize(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/maximize".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = maximize(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = maximize(session)
+ assert_success(response)
+
+
+def test_fully_exit_fullscreen(session):
+ session.window.fullscreen()
+ assert is_fullscreen(session)
+
+ response = maximize(session)
+ assert_success(response)
+ assert not is_fullscreen(session)
+
+
+def test_restore_the_window(session):
+ session.window.minimize()
+ assert document_hidden(session)
+
+ response = maximize(session)
+ assert_success(response)
+
+
+def test_maximize(session):
+ before_size = session.window.size
+
+ response = maximize(session)
+ assert_success(response)
+
+ assert before_size != session.window.size
+
+
+def test_payload(session):
+ before_size = session.window.size
+
+ response = maximize(session)
+
+ assert response.status == 200
+ assert isinstance(response.body["value"], dict)
+
+ value = response.body["value"]
+ assert "width" in value
+ assert "height" in value
+ assert "x" in value
+ assert "y" in value
+ assert isinstance(value["width"], int)
+ assert isinstance(value["height"], int)
+ assert isinstance(value["x"], int)
+ assert isinstance(value["y"], int)
+
+ assert before_size != session.window.size
+
+
+def test_maximize_twice_is_idempotent(session):
+ first_response = maximize(session)
+ assert_success(first_response)
+ max_size = session.window.size
+
+ second_response = maximize(session)
+ assert_success(second_response)
+ assert session.window.size == max_size
+
+
+def test_maximize_when_resized_to_max_size(session):
+ # Determine the largest available window size by first maximising
+ # the window and getting the window rect dimensions.
+ #
+ # Then resize the window to the maximum available size.
+ session.end()
+ session.window.maximize()
+ available = session.window.size
+ session.window.size = (800, 600)
+ session.end()
+
+ session.window.size = available
+
+ # In certain window managers a window extending to the full available
+ # dimensions of the screen may not imply that the window is maximised,
+ # since this is often a special state. If a remote end expects a DOM
+ # resize event, this may not fire if the window has already reached
+ # its expected dimensions.
+ before = session.window.size
+ session.window.maximize()
+ after = session.window.size
+ assert after == before
diff --git a/testing/web-platform/tests/webdriver/tests/maximize_window/stress.py b/testing/web-platform/tests/webdriver/tests/maximize_window/stress.py
new file mode 100644
index 0000000000..4527c64a28
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/maximize_window/stress.py
@@ -0,0 +1,43 @@
+# META: timeout=long
+
+import time
+
+import pytest
+
+from tests.support.asserts import assert_success
+from tests.support.helpers import document_hidden
+
+
+def maximize_window(session):
+ response = session.transport.send(
+ "POST", "session/{session_id}/window/maximize".format(**vars(session)))
+ rect = assert_success(response)
+ return (rect["width"], rect["height"])
+
+
+@pytest.mark.parametrize("i", range(5))
+def test_stress(session, i):
+ """
+ Without defining the heuristics of each platform WebDriver runs on,
+ the best we can do is to test that maximization occurs synchronously.
+
+ Not all systems and window managers support maximizing the window,
+ but they are expected to do their best. The minimum requirement
+ is that the maximized window is larger than its original size.
+
+ To ensure the maximization happened synchronously, we test
+ that the size hasn't changed after a short amount of time,
+ using a thread suspend. This is not ideal, but the best we
+ can do given the level of platform ambiguity implied by WebDriver.
+ """
+ session.window.size = (100, 100)
+ session.window.position = (0, 0)
+ original_size = session.window.size
+
+ size_after_maximize = maximize_window(session)
+ assert size_after_maximize > original_size
+
+ t_end = time.time() + 3
+ while time.time() < t_end:
+ assert session.window.size == size_after_maximize
+ time.sleep(.1)
diff --git a/testing/web-platform/tests/webdriver/tests/maximize_window/user_prompts.py b/testing/web-platform/tests/webdriver/tests/maximize_window/user_prompts.py
new file mode 100644
index 0000000000..032edc893a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/maximize_window/user_prompts.py
@@ -0,0 +1,117 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def maximize(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/maximize".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ original_size = session.window.size
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = maximize(session)
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.window.size != original_size
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ original_size = session.window.size
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = maximize(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.window.size == original_size
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ original_size = session.window.size
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = maximize(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert session.window.size == original_size
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/minimize_window/__init__.py b/testing/web-platform/tests/webdriver/tests/minimize_window/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/minimize_window/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/minimize_window/minimize.py b/testing/web-platform/tests/webdriver/tests/minimize_window/minimize.py
new file mode 100644
index 0000000000..616b250c70
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/minimize_window/minimize.py
@@ -0,0 +1,69 @@
+# META: timeout=long
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.helpers import document_hidden, is_fullscreen
+
+
+def minimize(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/minimize".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = minimize(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = minimize(session)
+ assert_success(response)
+
+
+def test_fully_exit_fullscreen(session):
+ session.window.fullscreen()
+ assert is_fullscreen(session)
+
+ response = minimize(session)
+ assert_success(response)
+ assert not is_fullscreen(session)
+ assert document_hidden(session)
+
+
+def test_minimize(session):
+ assert not document_hidden(session)
+
+ response = minimize(session)
+ assert_success(response)
+ assert document_hidden(session)
+
+
+def test_payload(session):
+ assert not document_hidden(session)
+
+ response = minimize(session)
+ value = assert_success(response)
+ assert isinstance(value, dict)
+
+ value = response.body["value"]
+ assert "width" in value
+ assert "height" in value
+ assert "x" in value
+ assert "y" in value
+ assert isinstance(value["width"], int)
+ assert isinstance(value["height"], int)
+ assert isinstance(value["x"], int)
+ assert isinstance(value["y"], int)
+
+ assert document_hidden(session)
+
+
+def test_minimize_twice_is_idempotent(session):
+ assert not document_hidden(session)
+
+ first_response = minimize(session)
+ assert_success(first_response)
+ assert document_hidden(session)
+
+ second_response = minimize(session)
+ assert_success(second_response)
+ assert document_hidden(session)
diff --git a/testing/web-platform/tests/webdriver/tests/minimize_window/stress.py b/testing/web-platform/tests/webdriver/tests/minimize_window/stress.py
new file mode 100644
index 0000000000..8990f12669
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/minimize_window/stress.py
@@ -0,0 +1,19 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_success
+from tests.support.helpers import document_hidden
+
+
+def minimize_window(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/minimize".format(**vars(session)))
+
+
+@pytest.mark.parametrize("i", range(5))
+def test_stress(session, i):
+ assert not document_hidden(session)
+ response = minimize_window(session)
+ assert_success(response)
+ assert document_hidden(session)
diff --git a/testing/web-platform/tests/webdriver/tests/minimize_window/user_prompts.py b/testing/web-platform/tests/webdriver/tests/minimize_window/user_prompts.py
new file mode 100644
index 0000000000..19059b3c39
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/minimize_window/user_prompts.py
@@ -0,0 +1,113 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+from tests.support.helpers import document_hidden
+
+
+def minimize(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/minimize".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ assert not document_hidden(session)
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = minimize(session)
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+ assert document_hidden(session)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ assert not document_hidden(session)
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = minimize(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+ assert not document_hidden(session)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ assert not document_hidden(session)
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = minimize(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert not document_hidden(session)
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/navigate_to/__init__.py b/testing/web-platform/tests/webdriver/tests/navigate_to/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/navigate_to/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/navigate_to/file.py b/testing/web-platform/tests/webdriver/tests/navigate_to/file.py
new file mode 100644
index 0000000000..5dae5f5c4d
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/navigate_to/file.py
@@ -0,0 +1,25 @@
+from tests.support import platform_name
+from tests.support.asserts import assert_success
+
+
+def navigate_to(session, url):
+ return session.transport.send(
+ "POST", "session/{session_id}/url".format(**vars(session)),
+ {"url": url})
+
+
+def test_file_protocol(session, server_config):
+ # tests that the browsing context remains the same
+ # when navigated privileged documents
+ path = server_config["doc_root"]
+ if platform_name == "windows":
+ # Convert the path into the format eg. /c:/foo/bar
+ path = "/{}".format(path.replace("\\", "/"))
+ url = u"file://{}".format(path)
+
+ response = navigate_to(session, url)
+ assert_success(response)
+
+ if session.url.endswith('/'):
+ url += '/'
+ assert session.url == url
diff --git a/testing/web-platform/tests/webdriver/tests/navigate_to/navigate.py b/testing/web-platform/tests/webdriver/tests/navigate_to/navigate.py
new file mode 100644
index 0000000000..d61377af27
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/navigate_to/navigate.py
@@ -0,0 +1,100 @@
+import time
+
+import pytest
+from webdriver import error
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def navigate_to(session, url):
+ return session.transport.send(
+ "POST", "session/{session_id}/url".format(**vars(session)),
+ {"url": url})
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/url".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_null_response_value(session, inline):
+ response = navigate_to(session, inline("<div/>"))
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = navigate_to(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame, inline):
+ doc = inline("<p>foo")
+
+ response = navigate_to(session, doc)
+ assert_success(response)
+
+ assert session.url == doc
+
+
+def test_basic(session, inline):
+ url = inline("<div id=foo>")
+
+ session.url = inline("<div id=bar>")
+ element = session.find.css("#bar", all=False)
+
+ response = navigate_to(session, url)
+ assert_success(response)
+
+ with pytest.raises(error.StaleElementReferenceException):
+ element.property("id")
+
+ assert session.url == url
+ assert session.find.css("#foo", all=False)
+
+
+# Capability needed as long as no valid certificate is available:
+# https://github.com/web-platform-tests/wpt/issues/28847
+@pytest.mark.capabilities({"acceptInsecureCerts": True})
+def test_cross_origin(session, inline, url):
+ base_path = ("/webdriver/tests/support/html/subframe.html" +
+ "?pipe=header(Cross-Origin-Opener-Policy,same-origin")
+ first_page = url(base_path, protocol="https")
+ second_page = url(base_path, protocol="https", domain="alt")
+
+ response = navigate_to(session, first_page)
+ assert_success(response)
+
+ assert session.url == first_page
+ elem = session.find.css("#delete", all=False)
+
+ response = navigate_to(session, second_page)
+ assert_success(response)
+
+ assert session.url == second_page
+ with pytest.raises(error.NoSuchElementException):
+ elem.click()
+
+ session.find.css("#delete", all=False)
+
+
+@pytest.mark.capabilities({"pageLoadStrategy": "eager"})
+def test_utf8_meta_tag_after_1024_bytes(session, url):
+ page = url("/webdriver/tests/support/html/meta-utf8-after-1024-bytes.html")
+
+ # Loading the page will cause a real parse commencing, and a renavigation
+ # to the same URL getting triggered subsequently. Test that the navigate
+ # command waits long enough.
+ response = navigate_to(session, page)
+ assert_success(response)
+
+ # If the command returns too early the property will be reset due to the
+ # subsequent page load.
+ session.execute_script("window.foo = 'bar'")
+
+ # Use delay to allow a possible missing subsequent navigation to start
+ time.sleep(1)
+
+ assert session.execute_script("return window.foo") == "bar"
diff --git a/testing/web-platform/tests/webdriver/tests/navigate_to/user_prompts.py b/testing/web-platform/tests/webdriver/tests/navigate_to/user_prompts.py
new file mode 100644
index 0000000000..682bc40f4f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/navigate_to/user_prompts.py
@@ -0,0 +1,112 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
+
+
+def navigate_to(session, url):
+ return session.transport.send(
+ "POST", "session/{session_id}/url".format(**vars(session)),
+ {"url": url})
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ url = inline("<div/>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = navigate_to(session, url)
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.url == url
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ url = inline("<div/>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = navigate_to(session, url)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.url != url
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ url = inline("<div/>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = navigate_to(session, url)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert session.url != url
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type):
+ # retval not testable for confirm and prompt because window is gone
+ check_user_prompt_closed_without_exception(dialog_type, None)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type):
+ # retval not testable for confirm and prompt because window is gone
+ check_user_prompt_closed_without_exception(dialog_type, None)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/__init__.py b/testing/web-platform/tests/webdriver/tests/new_session/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/conftest.py b/testing/web-platform/tests/webdriver/tests/new_session/conftest.py
new file mode 100644
index 0000000000..d67fdba449
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/conftest.py
@@ -0,0 +1,79 @@
+import pytest
+
+from webdriver.transport import HTTPWireProtocol
+
+
+def product(a, b):
+ return [(a, item) for item in b]
+
+
+def flatten(l):
+ return [item for x in l for item in x]
+
+
+@pytest.fixture(name="add_browser_capabilities")
+def fixture_add_browser_capabilities(configuration):
+
+ def add_browser_capabilities(capabilities):
+ # Make sure there aren't keys in common.
+ assert not set(configuration["capabilities"]).intersection(set(capabilities))
+ result = dict(configuration["capabilities"])
+ result.update(capabilities)
+
+ return result
+
+ return add_browser_capabilities
+
+
+@pytest.fixture(name="configuration")
+def fixture_configuration(configuration):
+ """Remove "acceptInsecureCerts" from capabilities if it exists.
+
+ Some browser configurations add acceptInsecureCerts capability by default.
+ Remove it during new_session tests to avoid interference.
+ """
+
+ if "acceptInsecureCerts" in configuration["capabilities"]:
+ configuration = dict(configuration)
+ del configuration["capabilities"]["acceptInsecureCerts"]
+ return configuration
+
+@pytest.fixture(name="new_session")
+def fixture_new_session(request, configuration, current_session):
+ """Start a new session for tests which themselves test creating new sessions.
+
+ :param body: The content of the body for the new session POST request.
+
+ :param delete_existing_session: Allows the fixture to delete an already
+ created custom session before the new session is getting created. This
+ is useful for tests which call this fixture multiple times within the
+ same test.
+ """
+ custom_session = {}
+
+ transport = HTTPWireProtocol(
+ configuration["host"], configuration["port"], url_prefix="/",
+ )
+
+ def _delete_session(session_id):
+ transport.send("DELETE", "session/{}".format(session_id))
+
+ def new_session(body, delete_existing_session=False):
+ # If there is an active session from the global session fixture,
+ # delete that one first
+ if current_session is not None:
+ current_session.end()
+
+ if delete_existing_session:
+ _delete_session(custom_session["session"]["sessionId"])
+
+ response = transport.send("POST", "session", body)
+ if response.status == 200:
+ custom_session["session"] = response.body["value"]
+ return response, custom_session.get("session", None)
+
+ yield new_session
+
+ if custom_session.get("session") is not None:
+ _delete_session(custom_session["session"]["sessionId"])
+ custom_session = None
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/create_alwaysMatch.py b/testing/web-platform/tests/webdriver/tests/new_session/create_alwaysMatch.py
new file mode 100644
index 0000000000..a4cc9efc02
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/create_alwaysMatch.py
@@ -0,0 +1,15 @@
+# META: timeout=long
+
+import pytest
+
+from .conftest import product, flatten
+
+from tests.support.asserts import assert_success
+from tests.new_session.support.create import valid_data
+
+
+@pytest.mark.parametrize("key,value", flatten(product(*item) for item in valid_data))
+def test_valid(new_session, add_browser_capabilities, key, value):
+ response, _ = new_session({"capabilities": {
+ "alwaysMatch": add_browser_capabilities({key: value})}})
+ assert_success(response)
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/create_firstMatch.py b/testing/web-platform/tests/webdriver/tests/new_session/create_firstMatch.py
new file mode 100644
index 0000000000..ec671530f7
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/create_firstMatch.py
@@ -0,0 +1,16 @@
+# META: timeout=long
+
+import pytest
+
+from .conftest import product, flatten
+
+
+from tests.support.asserts import assert_success
+from tests.new_session.support.create import valid_data
+
+
+@pytest.mark.parametrize("key,value", flatten(product(*item) for item in valid_data))
+def test_valid(new_session, add_browser_capabilities, key, value):
+ response, _ = new_session({"capabilities": {
+ "firstMatch": [add_browser_capabilities({key: value})]}})
+ assert_success(response)
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/default_values.py b/testing/web-platform/tests/webdriver/tests/new_session/default_values.py
new file mode 100644
index 0000000000..abd4a5a64c
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/default_values.py
@@ -0,0 +1,46 @@
+# META: timeout=long
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def test_basic(new_session, add_browser_capabilities):
+ response, _ = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({})}})
+ value = assert_success(response)
+ assert set(value.keys()) == {"sessionId", "capabilities"}
+
+
+def test_repeat_new_session(new_session, add_browser_capabilities):
+ response, _ = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({})}})
+ assert_success(response)
+
+ response, _ = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({})}})
+ assert_error(response, "session not created")
+
+
+def test_no_capabilites(new_session):
+ response, _ = new_session({})
+ assert_error(response, "invalid argument")
+
+
+def test_missing_first_match(new_session, add_browser_capabilities):
+ response, _ = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({})}})
+ assert_success(response)
+
+
+def test_missing_always_match(new_session, add_browser_capabilities):
+ response, _ = new_session({"capabilities": {"firstMatch": [add_browser_capabilities({})]}})
+ assert_success(response)
+
+
+def test_desired(new_session, add_browser_capabilities):
+ response, _ = new_session({"desiredCapbilities": add_browser_capabilities({})})
+ assert_error(response, "invalid argument")
+
+
+def test_ignore_non_spec_fields_in_capabilities(new_session, add_browser_capabilities):
+ response, _ = new_session({"capabilities": {
+ "alwaysMatch": add_browser_capabilities({}),
+ "desiredCapbilities": {"pageLoadStrategy": "eager"},
+ }})
+ value = assert_success(response)
+ assert value["capabilities"]["pageLoadStrategy"] == "normal"
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/invalid_capabilities.py b/testing/web-platform/tests/webdriver/tests/new_session/invalid_capabilities.py
new file mode 100644
index 0000000000..afdcea0daf
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/invalid_capabilities.py
@@ -0,0 +1,56 @@
+import pytest
+
+from .conftest import product, flatten
+
+from tests.new_session.support.create import invalid_data, invalid_extensions
+from tests.support.asserts import assert_error
+
+
+@pytest.mark.parametrize("value", [None, 1, "{}", []])
+def test_invalid_capabilites(new_session, value):
+ response, _ = new_session({"capabilities": value})
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("value", [None, 1, "{}", []])
+def test_invalid_always_match(new_session, add_browser_capabilities, value):
+ capabilities = {"alwaysMatch": value, "firstMatch": [add_browser_capabilities({})]}
+
+ response, _ = new_session({"capabilities": capabilities})
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("value", [None, 1, "[]", {}])
+def test_invalid_first_match(new_session, add_browser_capabilities, value):
+ capabilities = {"alwaysMatch": add_browser_capabilities({}), "firstMatch": value}
+
+ response, _ = new_session({"capabilities": capabilities})
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("body", [lambda key, value: {"alwaysMatch": {key: value}},
+ lambda key, value: {"firstMatch": [{key: value}]}])
+@pytest.mark.parametrize("key,value", flatten(product(*item) for item in invalid_data))
+def test_invalid_values(new_session, add_browser_capabilities, body, key, value):
+ capabilities = body(key, value)
+ if "alwaysMatch" in capabilities:
+ capabilities["alwaysMatch"] = add_browser_capabilities(capabilities["alwaysMatch"])
+ else:
+ capabilities["firstMatch"][0] = add_browser_capabilities(capabilities["firstMatch"][0])
+
+ response, _ = new_session({"capabilities": capabilities})
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("body", [lambda key, value: {"alwaysMatch": {key: value}},
+ lambda key, value: {"firstMatch": [{key: value}]}])
+@pytest.mark.parametrize("key", invalid_extensions)
+def test_invalid_extensions(new_session, add_browser_capabilities, body, key):
+ capabilities = body(key, {})
+ if "alwaysMatch" in capabilities:
+ capabilities["alwaysMatch"] = add_browser_capabilities(capabilities["alwaysMatch"])
+ else:
+ capabilities["firstMatch"][0] = add_browser_capabilities(capabilities["firstMatch"][0])
+
+ response, _ = new_session({"capabilities": capabilities})
+ assert_error(response, "invalid argument")
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/merge.py b/testing/web-platform/tests/webdriver/tests/new_session/merge.py
new file mode 100644
index 0000000000..857d289fca
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/merge.py
@@ -0,0 +1,82 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support import platform_name
+
+
+@pytest.mark.skipif(platform_name is None, reason="Unsupported platform {}".format(platform_name))
+@pytest.mark.parametrize("body", [lambda key, value: {"alwaysMatch": {key: value}},
+ lambda key, value: {"firstMatch": [{key: value}]}])
+def test_platform_name(new_session, add_browser_capabilities, body):
+ capabilities = body("platformName", platform_name)
+ if "alwaysMatch" in capabilities:
+ capabilities["alwaysMatch"] = add_browser_capabilities(capabilities["alwaysMatch"])
+ else:
+ capabilities["firstMatch"][0] = add_browser_capabilities(capabilities["firstMatch"][0])
+
+ response, _ = new_session({"capabilities": capabilities})
+ value = assert_success(response)
+
+ assert value["capabilities"]["platformName"] == platform_name
+
+
+invalid_merge = [
+ ("acceptInsecureCerts", (True, True)),
+ ("unhandledPromptBehavior", ("accept", "accept")),
+ ("unhandledPromptBehavior", ("accept", "dismiss")),
+ ("timeouts", ({"script": 10}, {"script": 10})),
+ ("timeouts", ({"script": 10}, {"pageLoad": 10})),
+]
+
+
+@pytest.mark.parametrize("key,value", invalid_merge)
+def test_merge_invalid(new_session, add_browser_capabilities, key, value):
+ response, _ = new_session({"capabilities": {
+ "alwaysMatch": add_browser_capabilities({key: value[0]}),
+ "firstMatch": [{}, {key: value[1]}],
+ }})
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.skipif(platform_name is None, reason="Unsupported platform {}".format(platform_name))
+def test_merge_platformName(new_session, add_browser_capabilities):
+ response, _ = new_session({"capabilities": {
+ "alwaysMatch": add_browser_capabilities({"timeouts": {"script": 10}}),
+ "firstMatch": [{
+ "platformName": platform_name.upper(),
+ "pageLoadStrategy": "none",
+ }, {
+ "platformName": platform_name,
+ "pageLoadStrategy": "eager",
+ }]}})
+
+ value = assert_success(response)
+
+ assert value["capabilities"]["platformName"] == platform_name
+ assert value["capabilities"]["pageLoadStrategy"] == "eager"
+
+
+def test_merge_browserName(new_session, add_browser_capabilities):
+ response, session = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({})}})
+ value = assert_success(response)
+
+ browser_settings = {
+ "browserName": value["capabilities"]["browserName"],
+ "browserVersion": value["capabilities"]["browserVersion"],
+ }
+
+ response, _ = new_session({"capabilities": {
+ "alwaysMatch": add_browser_capabilities({"timeouts": {"script": 10}}),
+ "firstMatch": [{
+ "browserName": browser_settings["browserName"] + "invalid",
+ "pageLoadStrategy": "none",
+ }, {
+ "browserName": browser_settings["browserName"],
+ "pageLoadStrategy": "eager",
+ }]}}, delete_existing_session=True)
+ value = assert_success(response)
+
+ assert value["capabilities"]["browserName"] == browser_settings['browserName']
+ assert value["capabilities"]["pageLoadStrategy"] == "eager"
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/page_load_strategy.py b/testing/web-platform/tests/webdriver/tests/new_session/page_load_strategy.py
new file mode 100644
index 0000000000..69288ef433
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/page_load_strategy.py
@@ -0,0 +1,7 @@
+from tests.support.asserts import assert_success
+
+def test_pageLoadStrategy(new_session, add_browser_capabilities):
+ response, _ = new_session({"capabilities": {
+ "alwaysMatch": add_browser_capabilities({"pageLoadStrategy": "eager"})}})
+ value = assert_success(response)
+ assert value["capabilities"]["pageLoadStrategy"] == "eager"
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/platform_name.py b/testing/web-platform/tests/webdriver/tests/new_session/platform_name.py
new file mode 100644
index 0000000000..54fe4743be
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/platform_name.py
@@ -0,0 +1,11 @@
+import pytest
+
+from tests.support import platform_name
+from tests.support.asserts import assert_success
+
+
+@pytest.mark.skipif(platform_name is None, reason="Unsupported platform {}".format(platform_name))
+def test_corresponds_to_local_system(new_session, add_browser_capabilities):
+ response, _ = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({})}})
+ value = assert_success(response)
+ assert value["capabilities"]["platformName"] == platform_name
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/response.py b/testing/web-platform/tests/webdriver/tests/new_session/response.py
new file mode 100644
index 0000000000..43a8d57931
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/response.py
@@ -0,0 +1,44 @@
+import uuid
+import pytest
+
+from tests.support.asserts import assert_success
+
+
+def test_sessionid(new_session, add_browser_capabilities):
+ response, _ = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({})}})
+ value = assert_success(response)
+ assert isinstance(value["sessionId"], str)
+ uuid.UUID(hex=value["sessionId"])
+
+
+@pytest.mark.parametrize("capability, type", [
+ ("browserName", str),
+ ("browserVersion", str),
+ ("platformName", str),
+ ("acceptInsecureCerts", bool),
+ ("pageLoadStrategy", str),
+ ("proxy", dict),
+ ("setWindowRect", bool),
+ ("timeouts", dict),
+ ("strictFileInteractability", bool),
+ ("unhandledPromptBehavior", str),
+])
+def test_capability_type(session, capability, type):
+ assert isinstance(session.capabilities, dict)
+ assert capability in session.capabilities
+ assert isinstance(session.capabilities[capability], type)
+
+
+@pytest.mark.parametrize("capability, default_value", [
+ ("acceptInsecureCerts", False),
+ ("pageLoadStrategy", "normal"),
+ ("proxy", {}),
+ ("setWindowRect", True),
+ ("timeouts", {"implicit": 0, "pageLoad": 300000, "script": 30000}),
+ ("strictFileInteractability", False),
+ ("unhandledPromptBehavior", "dismiss and notify"),
+])
+def test_capability_default_value(session, capability, default_value):
+ assert isinstance(session.capabilities, dict)
+ assert capability in session.capabilities
+ assert session.capabilities[capability] == default_value
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/support/__init__.py b/testing/web-platform/tests/webdriver/tests/new_session/support/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/support/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/support/create.py b/testing/web-platform/tests/webdriver/tests/new_session/support/create.py
new file mode 100644
index 0000000000..a0d0ce37b5
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/support/create.py
@@ -0,0 +1,136 @@
+# Note that we can only test things here all implementations must support
+valid_data = [
+ ("acceptInsecureCerts", [
+ False, None,
+ ]),
+ ("browserName", [
+ None,
+ ]),
+ ("browserVersion", [
+ None,
+ ]),
+ ("platformName", [
+ None,
+ ]),
+ ("pageLoadStrategy", [
+ None,
+ "none",
+ "eager",
+ "normal",
+ ]),
+ ("proxy", [
+ None,
+ ]),
+ ("timeouts", [
+ None, {},
+ {"script": 0, "pageLoad": 2.0, "implicit": 2**53 - 1},
+ {"script": 50, "pageLoad": 25},
+ {"script": 500},
+ ]),
+ ("strictFileInteractability", [
+ True, False, None,
+ ]),
+ ("unhandledPromptBehavior", [
+ "dismiss",
+ "accept",
+ None,
+ ]),
+ ("test:extension", [
+ None, False, "abc", 123, [],
+ {"key": "value"},
+ ]),
+]
+
+invalid_data = [
+ ("acceptInsecureCerts", [
+ 1, [], {}, "false",
+ ]),
+ ("browserName", [
+ 1, [], {}, False,
+ ]),
+ ("browserVersion", [
+ 1, [], {}, False,
+ ]),
+ ("platformName", [
+ 1, [], {}, False,
+ ]),
+ ("pageLoadStrategy", [
+ 1, [], {}, False,
+ "invalid",
+ "NONE",
+ "Eager",
+ "eagerblah",
+ "interactive",
+ " eager",
+ "eager "]),
+ ("proxy", [
+ 1, [], "{}",
+ {"proxyType": "SYSTEM"},
+ {"proxyType": "systemSomething"},
+ {"proxy type": "pac"},
+ {"proxy-Type": "system"},
+ {"proxy_type": "system"},
+ {"proxytype": "system"},
+ {"PROXYTYPE": "system"},
+ {"proxyType": None},
+ {"proxyType": 1},
+ {"proxyType": []},
+ {"proxyType": {"value": "system"}},
+ {" proxyType": "system"},
+ {"proxyType ": "system"},
+ {"proxyType ": " system"},
+ {"proxyType": "system "},
+ ]),
+ ("timeouts", [
+ 1, [], "{}", False,
+ {"invalid": 10},
+ {"PAGELOAD": 10},
+ {"page load": 10},
+ {" pageLoad": 10},
+ {"pageLoad ": 10},
+ {"pageLoad": None},
+ {"pageLoad": False},
+ {"pageLoad": []},
+ {"pageLoad": "10"},
+ {"pageLoad": 2.5},
+ {"pageLoad": -1},
+ {"pageLoad": 2**53},
+ {"pageLoad": {"value": 10}},
+ {"pageLoad": 10, "invalid": 10},
+ ]),
+ ("strictFileInteractability", [
+ 1, [], {}, "false",
+ ]),
+ ("unhandledPromptBehavior", [
+ 1, [], {}, False,
+ "DISMISS",
+ "dismissABC",
+ "Accept",
+ " dismiss",
+ "dismiss ",
+ ])
+]
+
+invalid_extensions = [
+ "automaticInspection",
+ "automaticProfiling",
+ "browser",
+ "chromeOptions",
+ "ensureCleanSession",
+ "firefox",
+ "firefox_binary",
+ "firefoxOptions",
+ "initialBrowserUrl",
+ "javascriptEnabled",
+ "logFile",
+ "logLevel",
+ "nativeEvents",
+ "platform",
+ "platformVersion",
+ "profile",
+ "requireWindowFocus",
+ "safari.options",
+ "seleniumProtocol",
+ "trustAllSSLCertificates",
+ "version",
+]
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/timeouts.py b/testing/web-platform/tests/webdriver/tests/new_session/timeouts.py
new file mode 100644
index 0000000000..4f2652bba8
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/timeouts.py
@@ -0,0 +1,32 @@
+import pytest
+
+from tests.support.asserts import assert_success, assert_error
+
+
+def test_default_values(session):
+ timeouts = session.capabilities["timeouts"]
+
+ assert timeouts["implicit"] == 0
+ assert timeouts["pageLoad"] == 300000
+ assert timeouts["script"] == 30000
+
+
+@pytest.mark.parametrize("timeouts", [
+ {"implicit": 444, "pageLoad": 300000,"script": 30000},
+ {"implicit": 0, "pageLoad": 444,"script": 30000},
+ {"implicit": 0, "pageLoad": 300000,"script": 444},
+ {"implicit": 0, "pageLoad": 300000,"script": None},
+])
+def test_timeouts(new_session, add_browser_capabilities, timeouts):
+ response, _ = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({"timeouts": timeouts})}})
+ value = assert_success(response)
+ assert value["capabilities"]["timeouts"] == timeouts
+
+@pytest.mark.parametrize("timeouts", [
+ {"implicit": None, "pageLoad": 300000,"script": 30000},
+ {"implicit": 0, "pageLoad": None,"script": 30000},
+ {"implicit": None, "pageLoad": None,"script": None}
+])
+def test_invalid_timeouts(new_session, add_browser_capabilities, timeouts):
+ response, _ = new_session({"capabilities": {"alwaysMatch": add_browser_capabilities({"timeouts": timeouts})}})
+ assert_error(response, "invalid argument")
diff --git a/testing/web-platform/tests/webdriver/tests/new_session/websocket_url.py b/testing/web-platform/tests/webdriver/tests/new_session/websocket_url.py
new file mode 100644
index 0000000000..452decc90a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_session/websocket_url.py
@@ -0,0 +1,7 @@
+from tests.support.asserts import assert_success
+
+def test_websocket_url(new_session, add_browser_capabilities):
+ response, _ = new_session({"capabilities": {
+ "alwaysMatch": add_browser_capabilities({"webSocketUrl": True})}})
+ value = assert_success(response)
+ assert value["capabilities"]["webSocketUrl"].startswith("ws://")
diff --git a/testing/web-platform/tests/webdriver/tests/new_window/__init__.py b/testing/web-platform/tests/webdriver/tests/new_window/__init__.py
new file mode 100644
index 0000000000..e16014597c
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_window/__init__.py
@@ -0,0 +1,10 @@
+def opener(session):
+ return session.execute_script("""
+ return window.opener;
+ """)
+
+
+def window_name(session):
+ return session.execute_script("""
+ return window.name;
+ """)
diff --git a/testing/web-platform/tests/webdriver/tests/new_window/new.py b/testing/web-platform/tests/webdriver/tests/new_window/new.py
new file mode 100644
index 0000000000..fd0a1ffceb
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_window/new.py
@@ -0,0 +1,64 @@
+import pytest
+
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def new_window(session, type_hint=None):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/new".format(**vars(session)),
+ {"type": type_hint})
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/window/new".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = new_window(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ original_handles = session.handles
+
+ response = new_window(session)
+ value = assert_success(response)
+ handles = session.handles
+ assert len(handles) == len(original_handles) + 1
+ assert value["handle"] in handles
+ assert value["handle"] not in original_handles
+ assert value["type"] in ["tab", "window"]
+
+
+@pytest.mark.parametrize("type_hint", [True, 42, 4.2, [], {}])
+def test_type_with_invalid_type(session, type_hint):
+ response = new_window(session, type_hint)
+ assert_error(response, "invalid argument")
+
+
+def test_type_with_null_value(session):
+ original_handles = session.handles
+
+ response = new_window(session, type_hint=None)
+ value = assert_success(response)
+ handles = session.handles
+ assert len(handles) == len(original_handles) + 1
+ assert value["handle"] in handles
+ assert value["handle"] not in original_handles
+ assert value["type"] in ["tab", "window"]
+
+
+def test_type_with_unknown_value(session):
+ original_handles = session.handles
+
+ response = new_window(session, type_hint="foo")
+ value = assert_success(response)
+ handles = session.handles
+ assert len(handles) == len(original_handles) + 1
+ assert value["handle"] in handles
+ assert value["handle"] not in original_handles
+ assert value["type"] in ["tab", "window"]
diff --git a/testing/web-platform/tests/webdriver/tests/new_window/new_tab.py b/testing/web-platform/tests/webdriver/tests/new_window/new_tab.py
new file mode 100644
index 0000000000..f6cacf3c35
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_window/new_tab.py
@@ -0,0 +1,89 @@
+from tests.support.asserts import assert_success
+
+from . import opener, window_name
+
+
+def new_window(session, type_hint=None):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/new".format(**vars(session)),
+ {"type": type_hint})
+
+
+def test_payload(session):
+ original_handles = session.handles
+
+ response = new_window(session, type_hint="tab")
+ value = assert_success(response)
+ handles = session.handles
+ assert len(handles) == len(original_handles) + 1
+ assert value["handle"] in handles
+ assert value["handle"] not in original_handles
+ assert value["type"] == "tab"
+
+
+def test_keeps_current_window_handle(session):
+ original_handle = session.window_handle
+
+ response = new_window(session, type_hint="tab")
+ value = assert_success(response)
+ assert value["type"] == "tab"
+
+ assert session.window_handle == original_handle
+
+
+def test_opens_about_blank_in_new_tab(session, inline):
+ url = inline("<p>foo")
+ session.url = url
+
+ response = new_window(session, type_hint="tab")
+ value = assert_success(response)
+ assert value["type"] == "tab"
+
+ assert session.url == url
+
+ session.window_handle = value["handle"]
+ assert session.url == "about:blank"
+
+
+def test_sets_no_window_name(session):
+ response = new_window(session, type_hint="tab")
+ value = assert_success(response)
+ assert value["type"] == "tab"
+
+ session.window_handle = value["handle"]
+ assert window_name(session) == ""
+
+
+def test_sets_no_opener(session):
+ response = new_window(session, type_hint="tab")
+ value = assert_success(response)
+ assert value["type"] == "tab"
+
+ session.window_handle = value["handle"]
+ assert opener(session) is None
+
+
+def test_focus_content(session, inline):
+ response = new_window(session, type_hint="tab")
+ value = assert_success(response)
+ assert value["type"] == "tab"
+
+ session.window_handle = value["handle"]
+
+ session.url = inline("""
+ <span contenteditable="true"> abc </span>
+ <script>
+ const selection = getSelection();
+ window.onload = async() => {
+ const initial = document.querySelector("span");
+ initial.focus();
+ initial.setAttribute(
+ "_focused",
+ selection.anchorNode == initial.firstChild
+ );
+ }
+ </script>
+ """)
+
+ elem = session.find.css("span", all=False)
+ assert elem.attribute("_focused") == "true"
diff --git a/testing/web-platform/tests/webdriver/tests/new_window/new_window.py b/testing/web-platform/tests/webdriver/tests/new_window/new_window.py
new file mode 100644
index 0000000000..a3fce364cc
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_window/new_window.py
@@ -0,0 +1,89 @@
+from tests.support.asserts import assert_success
+
+from . import opener, window_name
+
+
+def new_window(session, type_hint=None):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/new".format(**vars(session)),
+ {"type": type_hint})
+
+
+def test_payload(session):
+ original_handles = session.handles
+
+ response = new_window(session, type_hint="window")
+ value = assert_success(response)
+ handles = session.handles
+ assert len(handles) == len(original_handles) + 1
+ assert value["handle"] in handles
+ assert value["handle"] not in original_handles
+ assert value["type"] == "window"
+
+
+def test_keeps_current_window_handle(session):
+ original_handle = session.window_handle
+
+ response = new_window(session, type_hint="window")
+ value = assert_success(response)
+ assert value["type"] == "window"
+
+ assert session.window_handle == original_handle
+
+
+def test_opens_about_blank_in_new_window(session, inline):
+ url = inline("<p>foo")
+ session.url = url
+
+ response = new_window(session, type_hint="window")
+ value = assert_success(response)
+ assert value["type"] == "window"
+
+ assert session.url == url
+
+ session.window_handle = value["handle"]
+ assert session.url == "about:blank"
+
+
+def test_sets_no_window_name(session):
+ response = new_window(session, type_hint="window")
+ value = assert_success(response)
+ assert value["type"] == "window"
+
+ session.window_handle = value["handle"]
+ assert window_name(session) == ""
+
+
+def test_sets_no_opener(session):
+ response = new_window(session, type_hint="window")
+ value = assert_success(response)
+ assert value["type"] == "window"
+
+ session.window_handle = value["handle"]
+ assert opener(session) is None
+
+
+def test_focus_content(session, inline):
+ response = new_window(session, type_hint="window")
+ value = assert_success(response)
+ assert value["type"] == "window"
+
+ session.window_handle = value["handle"]
+
+ session.url = inline("""
+ <span contenteditable="true"> abc </span>
+ <script>
+ const selection = getSelection();
+ window.onload = async() => {
+ const initial = document.querySelector("span");
+ initial.focus();
+ initial.setAttribute(
+ "_focused",
+ selection.anchorNode == initial.firstChild
+ );
+ }
+ </script>
+ """)
+
+ elem = session.find.css("span", all=False)
+ assert elem.attribute("_focused") == "true"
diff --git a/testing/web-platform/tests/webdriver/tests/new_window/user_prompts.py b/testing/web-platform/tests/webdriver/tests/new_window/user_prompts.py
new file mode 100644
index 0000000000..0d841468ee
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/new_window/user_prompts.py
@@ -0,0 +1,121 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def new_window(session, type_hint=None):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/new".format(**vars(session)),
+ {"type": type_hint})
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ original_handles = session.handles
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = new_window(session)
+ value = assert_success(response)
+
+ handles = session.handles
+ assert len(handles) == len(original_handles) + 1
+ assert value["handle"] in handles
+ assert value["handle"] not in original_handles
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ original_handles = session.handles
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = new_window(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert len(session.handles) == len(original_handles)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ original_handles = session.handles
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = new_window(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert len(session.handles) == len(original_handles)
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/__init__.py b/testing/web-platform/tests/webdriver/tests/perform_actions/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/conftest.py b/testing/web-platform/tests/webdriver/tests/perform_actions/conftest.py
new file mode 100644
index 0000000000..477e461732
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/conftest.py
@@ -0,0 +1,99 @@
+import pytest
+
+from webdriver.error import NoSuchWindowException
+
+from tests.perform_actions.support.keys import Keys
+
+
+@pytest.fixture
+def session_new_window(capabilities, session):
+ # Prevent unreleased dragged elements by running the test in a new window.
+ original_handle = session.window_handle
+ session.window_handle = session.new_window()
+
+ yield session
+
+ try:
+ session.window.close()
+ except NoSuchWindowException:
+ pass
+
+ session.window_handle = original_handle
+
+
+@pytest.fixture
+def key_chain(session):
+ return session.actions.sequence("key", "keyboard_id")
+
+
+@pytest.fixture
+def mouse_chain(session):
+ return session.actions.sequence(
+ "pointer",
+ "pointer_id",
+ {"pointerType": "mouse"})
+
+
+@pytest.fixture
+def touch_chain(session):
+ return session.actions.sequence(
+ "pointer",
+ "pointer_id",
+ {"pointerType": "touch"})
+
+
+@pytest.fixture
+def pen_chain(session):
+ return session.actions.sequence(
+ "pointer",
+ "pointer_id",
+ {"pointerType": "pen"})
+
+
+@pytest.fixture
+def none_chain(session):
+ return session.actions.sequence("none", "none_id")
+
+
+@pytest.fixture
+def wheel_chain(session):
+ return session.actions.sequence("wheel", "wheel_id")
+
+
+@pytest.fixture(autouse=True)
+def release_actions(session, request):
+ # release all actions after each test
+ # equivalent to a teardown_function, but with access to session fixture
+ request.addfinalizer(session.actions.release)
+
+
+@pytest.fixture
+def key_reporter(session, test_actions_page, request):
+ """Represents focused input element from `test_keys_page` fixture."""
+ input_el = session.find.css("#keys", all=False)
+ input_el.click()
+ session.execute_script("resetEvents();")
+ return input_el
+
+
+@pytest.fixture
+def modifier_key(session):
+ if session.capabilities["platformName"] == "mac":
+ return Keys.META
+ else:
+ return Keys.CONTROL
+
+
+@pytest.fixture
+def test_actions_page(session, url):
+ session.url = url("/webdriver/tests/perform_actions/support/test_actions_wdspec.html")
+
+
+@pytest.fixture
+def test_actions_scroll_page(session, url):
+ session.url = url("/webdriver/tests/perform_actions/support/test_actions_scroll_wdspec.html")
+
+
+@pytest.fixture
+def test_actions_pointer_page(session, url):
+ session.url = url("/webdriver/tests/perform_actions/support/test_actions_pointer_wdspec.html")
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/key.py b/testing/web-platform/tests/webdriver/tests/perform_actions/key.py
new file mode 100644
index 0000000000..eef0ab96f4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/key.py
@@ -0,0 +1,38 @@
+import pytest
+
+from webdriver.error import NoSuchWindowException
+
+from tests.perform_actions.support.keys import Keys
+from tests.perform_actions.support.refine import get_keys
+
+
+def test_null_response_value(session, key_chain):
+ value = key_chain.key_up("a").perform()
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window, key_chain):
+ with pytest.raises(NoSuchWindowException):
+ key_chain.key_up("a").perform()
+
+
+def test_no_browsing_context(session, closed_frame, key_chain):
+ with pytest.raises(NoSuchWindowException):
+ key_chain.key_up("a").perform()
+
+
+def test_element_not_focused(session, test_actions_page, key_chain):
+ key_reporter = session.find.css("#keys", all=False)
+
+ key_chain.key_down("a").key_up("a").perform()
+
+ assert get_keys(key_reporter) == ""
+
+
+def test_backspace_erases_keys(session, key_reporter, key_chain):
+ key_chain \
+ .send_keys("efcd") \
+ .send_keys([Keys.BACKSPACE, Keys.BACKSPACE]) \
+ .perform()
+
+ assert get_keys(key_reporter) == "ef"
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/key_events.py b/testing/web-platform/tests/webdriver/tests/perform_actions/key_events.py
new file mode 100644
index 0000000000..8dbe024d18
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/key_events.py
@@ -0,0 +1,243 @@
+# META: timeout=long
+import copy
+from collections import defaultdict
+
+import pytest
+
+from tests.perform_actions.support.keys import ALL_EVENTS, Keys, ALTERNATIVE_KEY_NAMES
+from tests.perform_actions.support.refine import get_events, get_keys
+from tests.support.helpers import filter_dict
+
+
+def test_keyup_only_sends_no_events(session, key_reporter, key_chain):
+ key_chain.key_up("a").perform()
+
+ assert len(get_keys(key_reporter)) == 0
+ assert len(get_events(session)) == 0
+
+ session.actions.release()
+ assert len(get_keys(key_reporter)) == 0
+ assert len(get_events(session)) == 0
+
+
+@pytest.mark.parametrize("key, event", [
+ (Keys.ALT, "ALT"),
+ (Keys.CONTROL, "CONTROL"),
+ (Keys.META, "META"),
+ (Keys.SHIFT, "SHIFT"),
+ (Keys.R_ALT, "R_ALT"),
+ (Keys.R_CONTROL, "R_CONTROL"),
+ (Keys.R_META, "R_META"),
+ (Keys.R_SHIFT, "R_SHIFT"),
+])
+def test_modifier_key_sends_correct_events(session, key_reporter, key_chain, key, event):
+ code = ALL_EVENTS[event]["code"]
+ value = ALL_EVENTS[event]["key"]
+
+ if session.capabilities["browserName"] == "internet explorer":
+ key_reporter.click()
+ session.execute_script("resetEvents();")
+ key_chain \
+ .key_down(key) \
+ .key_up(key) \
+ .perform()
+ all_events = get_events(session)
+
+ expected = [
+ {"code": code, "key": value, "type": "keydown"},
+ {"code": code, "key": value, "type": "keyup"},
+ ]
+
+ events = [filter_dict(e, expected[0]) for e in all_events]
+ if len(events) > 0 and events[0]["code"] is None:
+ # Remove 'code' entry if browser doesn't support it
+ expected = [filter_dict(e, {"key": "", "type": ""}) for e in expected]
+ events = [filter_dict(e, expected[0]) for e in events]
+ assert events == expected
+
+ assert len(get_keys(key_reporter)) == 0
+
+
+@pytest.mark.parametrize("key,event", [
+ (Keys.ESCAPE, "ESCAPE"),
+ (Keys.RIGHT, "RIGHT"),
+])
+def test_non_printable_key_sends_events(session, key_reporter, key_chain, key, event):
+ code = ALL_EVENTS[event]["code"]
+ value = ALL_EVENTS[event]["key"]
+
+ key_chain \
+ .key_down(key) \
+ .key_up(key) \
+ .perform()
+ all_events = get_events(session)
+
+ expected = [
+ {"code": code, "key": value, "type": "keydown"},
+ {"code": code, "key": value, "type": "keypress"},
+ {"code": code, "key": value, "type": "keyup"},
+ ]
+
+ # Make a copy for alternate key property values
+ # Note: only keydown and keyup are affected by alternate key names
+ alt_expected = copy.deepcopy(expected)
+ if event in ALTERNATIVE_KEY_NAMES:
+ alt_expected[0]["key"] = ALTERNATIVE_KEY_NAMES[event]
+ alt_expected[2]["key"] = ALTERNATIVE_KEY_NAMES[event]
+
+ events = [filter_dict(e, expected[0]) for e in all_events]
+ if len(events) > 0 and events[0]["code"] is None:
+ # Remove 'code' entry if browser doesn't support it
+ expected = [filter_dict(e, {"key": "", "type": ""}) for e in expected]
+ alt_expected = [filter_dict(e, {"key": "", "type": ""}) for e in alt_expected]
+ events = [filter_dict(e, expected[0]) for e in events]
+ if len(events) == 2:
+ # most browsers don't send a keypress for non-printable keys
+ assert events == [expected[0], expected[2]] or events == [alt_expected[0], alt_expected[2]]
+ else:
+ assert events == expected or events == alt_expected
+
+ assert len(get_keys(key_reporter)) == 0
+
+
+@pytest.mark.parametrize("value,code", [
+ (u"a", "KeyA",),
+ ("a", "KeyA",),
+ (u"\"", "Quote"),
+ (u",", "Comma"),
+ (u"\u00E0", ""),
+ (u"\u0416", ""),
+ (u"@", "Digit2"),
+ (u"\u2603", ""),
+ (u"\uF6C2", ""), # PUA
+])
+def test_printable_key_sends_correct_events(session, key_reporter, key_chain, value, code):
+ key_chain \
+ .key_down(value) \
+ .key_up(value) \
+ .perform()
+ all_events = get_events(session)
+
+ expected = [
+ {"code": code, "key": value, "type": "keydown"},
+ {"code": code, "key": value, "type": "keypress"},
+ {"code": code, "key": value, "type": "keyup"},
+ ]
+
+ events = [filter_dict(e, expected[0]) for e in all_events]
+ if len(events) > 0 and events[0]["code"] is None:
+ # Remove 'code' entry if browser doesn't support it
+ expected = [filter_dict(e, {"key": "", "type": ""}) for e in expected]
+ events = [filter_dict(e, expected[0]) for e in events]
+ assert events == expected
+
+ assert get_keys(key_reporter) == value
+
+
+def test_sequence_of_keydown_printable_keys_sends_events(session, key_reporter, key_chain):
+ key_chain \
+ .key_down("a") \
+ .key_down("b") \
+ .perform()
+ all_events = get_events(session)
+
+ expected = [
+ {"code": "KeyA", "key": "a", "type": "keydown"},
+ {"code": "KeyA", "key": "a", "type": "keypress"},
+ {"code": "KeyB", "key": "b", "type": "keydown"},
+ {"code": "KeyB", "key": "b", "type": "keypress"},
+ ]
+
+ events = [filter_dict(e, expected[0]) for e in all_events]
+ if len(events) > 0 and events[0]["code"] is None:
+ # Remove 'code' entry if browser doesn't support it
+ expected = [filter_dict(e, {"key": "", "type": ""}) for e in expected]
+ events = [filter_dict(e, expected[0]) for e in events]
+ assert events == expected
+
+ assert get_keys(key_reporter) == "ab"
+
+
+def test_sequence_of_keydown_printable_characters_sends_events(session, key_reporter, key_chain):
+ key_chain.send_keys("ef").perform()
+ all_events = get_events(session)
+
+ expected = [
+ {"code": "KeyE", "key": "e", "type": "keydown"},
+ {"code": "KeyE", "key": "e", "type": "keypress"},
+ {"code": "KeyE", "key": "e", "type": "keyup"},
+ {"code": "KeyF", "key": "f", "type": "keydown"},
+ {"code": "KeyF", "key": "f", "type": "keypress"},
+ {"code": "KeyF", "key": "f", "type": "keyup"},
+ ]
+
+ events = [filter_dict(e, expected[0]) for e in all_events]
+ if len(events) > 0 and events[0]["code"] is None:
+ # Remove 'code' entry if browser doesn't support it
+ expected = [filter_dict(e, {"key": "", "type": ""}) for e in expected]
+ events = [filter_dict(e, expected[0]) for e in events]
+ assert events == expected
+
+ assert get_keys(key_reporter) == "ef"
+
+
+@pytest.mark.parametrize("name,expected", ALL_EVENTS.items())
+def test_special_key_sends_keydown(session, key_reporter, key_chain, name, expected):
+ if name.startswith("F"):
+ # Prevent default behavior for F1, etc., but only after keydown
+ # bubbles up to body. (Otherwise activated browser menus/functions
+ # may interfere with subsequent tests.)
+ session.execute_script("""
+ document.body.addEventListener("keydown",
+ function(e) { e.preventDefault() });
+ """)
+ if session.capabilities["browserName"] == "internet explorer":
+ key_reporter.click()
+ session.execute_script("resetEvents();")
+ key_chain.key_down(getattr(Keys, name)).perform()
+
+ # only interested in keydown
+ first_event = get_events(session)[0]
+ # make a copy so we can throw out irrelevant keys and compare to events
+ expected = dict(expected)
+
+ del expected["value"]
+
+ # make another copy for alternative key names
+ alt_expected = copy.deepcopy(expected)
+ if name in ALTERNATIVE_KEY_NAMES:
+ alt_expected["key"] = ALTERNATIVE_KEY_NAMES[name]
+
+ # check and remove keys that aren't in expected
+ assert first_event["type"] == "keydown"
+ assert first_event["repeat"] is False
+ first_event = filter_dict(first_event, expected)
+ if first_event["code"] is None:
+ del first_event["code"]
+ del expected["code"]
+ del alt_expected["code"]
+ assert first_event == expected or first_event == alt_expected
+ # only printable characters should be recorded in input field
+ entered_keys = get_keys(key_reporter)
+ if len(expected["key"]) == 1:
+ assert entered_keys == expected["key"]
+ else:
+ assert len(entered_keys) == 0
+
+
+def test_space_char_equals_pua(session, key_reporter, key_chain):
+ key_chain \
+ .key_down(Keys.SPACE) \
+ .key_up(Keys.SPACE) \
+ .key_down(" ") \
+ .key_up(" ") \
+ .perform()
+ all_events = get_events(session)
+ by_type = defaultdict(list)
+ for event in all_events:
+ by_type[event["type"]].append(event)
+
+ for event_type in by_type:
+ events = by_type[event_type]
+ assert len(events) == 2
+ assert events[0] == events[1]
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/key_modifiers.py b/testing/web-platform/tests/webdriver/tests/perform_actions/key_modifiers.py
new file mode 100644
index 0000000000..e8ea30dd3d
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/key_modifiers.py
@@ -0,0 +1,37 @@
+import pytest
+
+from tests.perform_actions.support.keys import Keys
+
+
+@pytest.mark.parametrize("modifier", [Keys.SHIFT, Keys.R_SHIFT])
+def test_shift_modifier_and_non_printable_keys(session, key_reporter, key_chain, modifier):
+ key_chain \
+ .send_keys("foo") \
+ .key_down(modifier) \
+ .key_down(Keys.BACKSPACE) \
+ .key_up(modifier) \
+ .key_up(Keys.BACKSPACE) \
+ .perform()
+
+ assert key_reporter.property("value") == "fo"
+
+
+@pytest.mark.parametrize("modifier", [Keys.SHIFT, Keys.R_SHIFT])
+def test_shift_modifier_generates_capital_letters(session, key_reporter, key_chain, modifier):
+ key_chain \
+ .send_keys("b") \
+ .key_down(modifier) \
+ .key_down("c") \
+ .key_up(modifier) \
+ .key_up("c") \
+ .key_down("d") \
+ .key_up("d") \
+ .key_down(modifier) \
+ .key_down("e") \
+ .key_up("e") \
+ .key_down("f") \
+ .key_up(modifier) \
+ .key_up("f") \
+ .perform()
+
+ assert key_reporter.property("value") == "bCdEF"
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/key_shortcuts.py b/testing/web-platform/tests/webdriver/tests/perform_actions/key_shortcuts.py
new file mode 100644
index 0000000000..b5ad7a2173
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/key_shortcuts.py
@@ -0,0 +1,49 @@
+from tests.perform_actions.support.keys import Keys
+from tests.perform_actions.support.refine import get_keys
+
+
+def test_mod_a_and_backspace_deletes_all_text(session, key_reporter, key_chain, modifier_key):
+ key_chain.send_keys("abc d") \
+ .key_down(modifier_key) \
+ .key_down("a") \
+ .key_up(modifier_key) \
+ .key_up("a") \
+ .key_down(Keys.BACKSPACE) \
+ .perform()
+ assert get_keys(key_reporter) == ""
+
+
+def test_mod_a_mod_c_right_mod_v_pastes_text(session, key_reporter, key_chain, modifier_key):
+ initial = "abc d"
+ key_chain.send_keys(initial) \
+ .key_down(modifier_key) \
+ .key_down("a") \
+ .key_up(modifier_key) \
+ .key_up("a") \
+ .key_down(modifier_key) \
+ .key_down("c") \
+ .key_up(modifier_key) \
+ .key_up("c") \
+ .send_keys([Keys.RIGHT]) \
+ .key_down(modifier_key) \
+ .key_down("v") \
+ .key_up(modifier_key) \
+ .key_up("v") \
+ .perform()
+ assert get_keys(key_reporter) == initial * 2
+
+
+def test_mod_a_mod_x_deletes_all_text(session, key_reporter, key_chain, modifier_key):
+ key_chain.send_keys("abc d") \
+ .key_down(modifier_key) \
+ .key_down("a") \
+ .key_up(modifier_key) \
+ .key_up("a") \
+ .key_down(modifier_key) \
+ .key_down("x") \
+ .key_up(modifier_key) \
+ .key_up("x") \
+ .perform()
+ assert get_keys(key_reporter) == ""
+
+
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/key_special_keys.py b/testing/web-platform/tests/webdriver/tests/perform_actions/key_special_keys.py
new file mode 100644
index 0000000000..003bba4294
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/key_special_keys.py
@@ -0,0 +1,38 @@
+import pytest
+
+from webdriver import error
+
+from tests.perform_actions.support.refine import get_keys
+
+
+@pytest.mark.parametrize("value", [
+ (u"\U0001F604"),
+ (u"\U0001F60D"),
+ (u"\u0BA8\u0BBF"),
+ (u"\u1100\u1161\u11A8"),
+])
+def test_codepoint_keys_behave_correctly(session, key_reporter, key_chain, value):
+ # Not using key_chain.send_keys() because we always want to treat value as
+ # one character here. `len(value)` varies by platform for non-BMP characters,
+ # so we don't want to iterate over value.
+ key_chain \
+ .key_down(value) \
+ .key_up(value) \
+ .perform()
+
+ # events sent by major browsers are inconsistent so only check key value
+ assert get_keys(key_reporter) == value
+
+
+@pytest.mark.parametrize("value", [
+ (u"fa"),
+ (u"\u0BA8\u0BBFb"),
+ (u"\u0BA8\u0BBF\u0BA8"),
+ (u"\u1100\u1161\u11A8c")
+])
+def test_invalid_multiple_codepoint_keys_fail(session, key_reporter, key_chain, value):
+ with pytest.raises(error.InvalidArgumentException):
+ key_chain \
+ .key_down(value) \
+ .key_up(value) \
+ .perform()
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/none.py b/testing/web-platform/tests/webdriver/tests/perform_actions/none.py
new file mode 100644
index 0000000000..4fadec40a4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/none.py
@@ -0,0 +1,24 @@
+from tests.support.asserts import assert_error, assert_success
+
+
+def perform_actions(session, actions):
+ return session.transport.send(
+ "POST",
+ "/session/{session_id}/actions".format(**vars(session)),
+ {"actions": actions},
+ )
+
+
+def test_null_response_value(session, none_chain):
+ response = perform_actions(session, [none_chain.pause(0).dict])
+ assert_success(response, None)
+
+
+def test_no_top_browsing_context(session, closed_window, none_chain):
+ response = perform_actions(session, [none_chain.pause(0).dict])
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame, none_chain):
+ response = perform_actions(session, [none_chain.pause(0).dict])
+ assert_error(response, "no such window")
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_contextmenu.py b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_contextmenu.py
new file mode 100644
index 0000000000..50f684327f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_contextmenu.py
@@ -0,0 +1,78 @@
+import pytest
+
+from tests.perform_actions.support.keys import Keys
+from tests.perform_actions.support.refine import get_events
+from tests.support.helpers import filter_dict
+
+
+@pytest.mark.parametrize("modifier, prop", [
+ (Keys.CONTROL, "ctrlKey"),
+ (Keys.R_CONTROL, "ctrlKey"),
+])
+def test_control_click(session, test_actions_page, key_chain, mouse_chain, modifier, prop):
+ os = session.capabilities["platformName"]
+ key_chain \
+ .pause(0) \
+ .key_down(modifier) \
+ .pause(200) \
+ .key_up(modifier)
+ outer = session.find.css("#outer", all=False)
+ mouse_chain.click(element=outer)
+ session.actions.perform([key_chain.dict, mouse_chain.dict])
+ if os != "mac":
+ expected = [
+ {"type": "mousemove"},
+ {"type": "mousedown"},
+ {"type": "mouseup"},
+ {"type": "click"},
+ ]
+ else:
+ expected = [
+ {"type": "mousemove"},
+ {"type": "mousedown"},
+ {"type": "contextmenu"},
+ {"type": "mouseup"},
+ ]
+ defaults = {
+ "altKey": False,
+ "metaKey": False,
+ "shiftKey": False,
+ "ctrlKey": False
+ }
+ for e in expected:
+ e.update(defaults)
+ if e["type"] != "mousemove":
+ e[prop] = True
+ filtered_events = [filter_dict(e, expected[0]) for e in get_events(session)]
+ assert expected == filtered_events
+
+
+def test_release_control_click(session, key_reporter, key_chain, mouse_chain):
+ # The context menu stays visible during subsequent tests so let's not
+ # display it in the first place.
+ session.execute_script("""
+ var keyReporter = document.getElementById("keys");
+ document.addEventListener("contextmenu", function(e) {
+ e.preventDefault();
+ });
+ """)
+ key_chain \
+ .pause(0) \
+ .key_down(Keys.CONTROL)
+ mouse_chain \
+ .pointer_move(0, 0, origin=key_reporter) \
+ .pointer_down()
+ session.actions.perform([key_chain.dict, mouse_chain.dict])
+ session.execute_script("""
+ var keyReporter = document.getElementById("keys");
+ keyReporter.addEventListener("mousedown", recordPointerEvent);
+ keyReporter.addEventListener("mouseup", recordPointerEvent);
+ resetEvents();
+ """)
+ session.actions.release()
+ expected = [
+ {"type": "mouseup"},
+ {"type": "keyup"},
+ ]
+ events = [filter_dict(e, expected[0]) for e in get_events(session)]
+ assert events == expected
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_dblclick.py b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_dblclick.py
new file mode 100644
index 0000000000..5be635a4d5
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_dblclick.py
@@ -0,0 +1,33 @@
+import pytest
+
+from tests.perform_actions.support.refine import get_events
+from tests.support.asserts import assert_move_to_coordinates
+from tests.support.helpers import filter_dict
+
+
+@pytest.mark.parametrize("click_pause", [0, 200])
+def test_dblclick_at_coordinates(session, test_actions_page, mouse_chain, click_pause):
+ div_point = {
+ "x": 82,
+ "y": 187,
+ }
+ mouse_chain \
+ .pointer_move(div_point["x"], div_point["y"]) \
+ .click() \
+ .pause(click_pause) \
+ .click() \
+ .perform()
+ events = get_events(session)
+ assert_move_to_coordinates(div_point, "outer", events)
+ expected = [
+ {"type": "mousedown", "button": 0},
+ {"type": "mouseup", "button": 0},
+ {"type": "click", "button": 0},
+ {"type": "mousedown", "button": 0},
+ {"type": "mouseup", "button": 0},
+ {"type": "click", "button": 0},
+ {"type": "dblclick", "button": 0},
+ ]
+ assert len(events) == 8
+ filtered_events = [filter_dict(e, expected[0]) for e in events]
+ assert expected == filtered_events[1:]
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_modifier_click.py b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_modifier_click.py
new file mode 100644
index 0000000000..28c4086e0f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_modifier_click.py
@@ -0,0 +1,85 @@
+import pytest
+
+from tests.perform_actions.support.keys import Keys
+from tests.perform_actions.support.refine import get_events
+from tests.support.helpers import filter_dict
+
+
+@pytest.mark.parametrize("modifier, prop", [
+ (Keys.ALT, "altKey"),
+ (Keys.R_ALT, "altKey"),
+ (Keys.META, "metaKey"),
+ (Keys.R_META, "metaKey"),
+ (Keys.SHIFT, "shiftKey"),
+ (Keys.R_SHIFT, "shiftKey"),
+])
+def test_modifier_click(session, test_actions_page, key_chain, mouse_chain, modifier, prop):
+ key_chain \
+ .pause(200) \
+ .key_down(modifier) \
+ .pause(200) \
+ .key_up(modifier)
+ outer = session.find.css("#outer", all=False)
+ mouse_chain.click(element=outer)
+ session.actions.perform([key_chain.dict, mouse_chain.dict])
+ expected = [
+ {"type": "mousemove"},
+ {"type": "mousedown"},
+ {"type": "mouseup"},
+ {"type": "click"},
+ ]
+ defaults = {
+ "altKey": False,
+ "metaKey": False,
+ "shiftKey": False,
+ "ctrlKey": False
+ }
+ for e in expected:
+ e.update(defaults)
+ if e["type"] != "mousemove":
+ e[prop] = True
+ filtered_events = [filter_dict(e, expected[0]) for e in get_events(session)]
+ assert expected == filtered_events
+
+
+def test_many_modifiers_click(session, test_actions_page, key_chain, mouse_chain):
+ outer = session.find.css("#outer", all=False)
+ dblclick_timeout = 800
+ key_chain \
+ .pause(0) \
+ .key_down(Keys.ALT) \
+ .key_down(Keys.SHIFT) \
+ .pause(dblclick_timeout) \
+ .key_up(Keys.ALT) \
+ .key_up(Keys.SHIFT)
+ mouse_chain \
+ .pointer_move(0, 0, origin=outer) \
+ .pause(0) \
+ .pointer_down() \
+ .pointer_up() \
+ .pause(0) \
+ .pause(0) \
+ .pointer_down()
+ session.actions.perform([key_chain.dict, mouse_chain.dict])
+ expected = [
+ {"type": "mousemove"},
+ # shift and alt pressed
+ {"type": "mousedown"},
+ {"type": "mouseup"},
+ {"type": "click"},
+ # no modifiers pressed
+ {"type": "mousedown"},
+ ]
+ defaults = {
+ "altKey": False,
+ "metaKey": False,
+ "shiftKey": False,
+ "ctrlKey": False
+ }
+ for e in expected:
+ e.update(defaults)
+ for e in expected[1:4]:
+ e["shiftKey"] = True
+ e["altKey"] = True
+ events = [filter_dict(e, expected[0]) for e in get_events(session)]
+ assert events == expected
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_mouse.py b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_mouse.py
new file mode 100644
index 0000000000..bbf2dde993
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_mouse.py
@@ -0,0 +1,194 @@
+# META: timeout=long
+
+import pytest
+
+from webdriver.error import InvalidArgumentException, NoSuchWindowException, StaleElementReferenceException
+
+from tests.perform_actions.support.mouse import get_inview_center, get_viewport_rect
+from tests.perform_actions.support.refine import get_events
+from tests.support.asserts import assert_move_to_coordinates
+from tests.support.helpers import filter_dict
+from tests.support.sync import Poll
+
+
+def test_null_response_value(session, mouse_chain):
+ value = mouse_chain.click().perform()
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window, mouse_chain):
+ with pytest.raises(NoSuchWindowException):
+ mouse_chain.click().perform()
+
+
+def test_no_browsing_context(session, closed_frame, mouse_chain):
+ with pytest.raises(NoSuchWindowException):
+ mouse_chain.click().perform()
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, mouse_chain, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ with pytest.raises(StaleElementReferenceException):
+ mouse_chain.click(element=element).perform()
+
+
+def test_click_at_coordinates(session, test_actions_page, mouse_chain):
+ div_point = {
+ "x": 82,
+ "y": 187,
+ }
+ mouse_chain \
+ .pointer_move(div_point["x"], div_point["y"], duration=1000) \
+ .click() \
+ .perform()
+ events = get_events(session)
+ assert len(events) == 4
+ assert_move_to_coordinates(div_point, "outer", events)
+ for e in events:
+ if e["type"] != "mousedown":
+ assert e["buttons"] == 0
+ assert e["button"] == 0
+ expected = [
+ {"type": "mousedown", "buttons": 1},
+ {"type": "mouseup", "buttons": 0},
+ {"type": "click", "buttons": 0},
+ ]
+ filtered_events = [filter_dict(e, expected[0]) for e in events]
+ assert expected == filtered_events[1:]
+
+
+def test_context_menu_at_coordinates(session, test_actions_page, mouse_chain):
+ div_point = {
+ "x": 82,
+ "y": 187,
+ }
+ mouse_chain \
+ .pointer_move(div_point["x"], div_point["y"]) \
+ .pointer_down(button=2) \
+ .pointer_up(button=2) \
+ .perform()
+ events = get_events(session)
+ expected = [
+ {"type": "mousedown", "button": 2},
+ {"type": "contextmenu", "button": 2},
+ ]
+ assert len(events) == 4
+ filtered_events = [filter_dict(e, expected[0]) for e in events]
+ mousedown_contextmenu_events = [
+ x for x in filtered_events
+ if x["type"] in ["mousedown", "contextmenu"]
+ ]
+ assert expected == mousedown_contextmenu_events
+
+
+def test_click_element_center(session, test_actions_page, mouse_chain):
+ outer = session.find.css("#outer", all=False)
+ center = get_inview_center(outer.rect, get_viewport_rect(session))
+ mouse_chain.click(element=outer).perform()
+ events = get_events(session)
+ assert len(events) == 4
+ event_types = [e["type"] for e in events]
+ assert ["mousemove", "mousedown", "mouseup", "click"] == event_types
+ for e in events:
+ if e["type"] != "mousemove":
+ assert e["pageX"] == pytest.approx(center["x"], abs=1.0)
+ assert e["pageY"] == pytest.approx(center["y"], abs=1.0)
+ assert e["target"] == "outer"
+
+
+def test_click_navigation(session, url, inline):
+ destination = url("/webdriver/tests/actions/support/test_actions_wdspec.html")
+ start = inline("<a href=\"{}\" id=\"link\">destination</a>".format(destination))
+
+ def click(link):
+ mouse_chain = session.actions.sequence(
+ "pointer", "pointer_id", {"pointerType": "mouse"})
+ mouse_chain.click(element=link).perform()
+
+ session.url = start
+ error_message = "Did not navigate to %s" % destination
+
+ click(session.find.css("#link", all=False))
+ Poll(session, message=error_message).until(lambda s: s.url == destination)
+ # repeat steps to check behaviour after document unload
+ session.url = start
+ click(session.find.css("#link", all=False))
+ Poll(session, message=error_message).until(lambda s: s.url == destination)
+
+
+@pytest.mark.parametrize("drag_duration", [0, 300, 800])
+@pytest.mark.parametrize("dx, dy", [
+ (20, 0), (0, 15), (10, 15), (-20, 0), (10, -15), (-10, -15)
+])
+def test_drag_and_drop(session,
+ test_actions_page,
+ mouse_chain,
+ dx,
+ dy,
+ drag_duration):
+ drag_target = session.find.css("#dragTarget", all=False)
+ initial_rect = drag_target.rect
+ initial_center = get_inview_center(initial_rect, get_viewport_rect(session))
+ # Conclude chain with extra move to allow time for last queued
+ # coordinate-update of drag_target and to test that drag_target is "dropped".
+ mouse_chain \
+ .pointer_move(0, 0, origin=drag_target) \
+ .pointer_down() \
+ .pointer_move(dx, dy, duration=drag_duration, origin="pointer") \
+ .pointer_up() \
+ .pointer_move(80, 50, duration=100, origin="pointer") \
+ .perform()
+ # mouseup that ends the drag is at the expected destination
+ e = get_events(session)[1]
+ assert e["type"] == "mouseup"
+ assert e["pageX"] == pytest.approx(initial_center["x"] + dx, abs=1.0)
+ assert e["pageY"] == pytest.approx(initial_center["y"] + dy, abs=1.0)
+ # check resulting location of the dragged element
+ final_rect = drag_target.rect
+ assert initial_rect["x"] + dx == final_rect["x"]
+ assert initial_rect["y"] + dy == final_rect["y"]
+
+
+@pytest.mark.parametrize("drag_duration", [0, 300, 800])
+def test_drag_and_drop_with_draggable_element(session_new_window,
+ test_actions_page,
+ mouse_chain,
+ drag_duration):
+ new_session = session_new_window
+ drag_target = new_session.find.css("#draggable", all=False)
+ drop_target = new_session.find.css("#droppable", all=False)
+ # Conclude chain with extra move to allow time for last queued
+ # coordinate-update of drag_target and to test that drag_target is "dropped".
+ mouse_chain \
+ .pointer_move(0, 0, origin=drag_target) \
+ .pointer_down() \
+ .pointer_move(50,
+ 25,
+ duration=drag_duration,
+ origin=drop_target) \
+ .pointer_up() \
+ .pointer_move(80, 50, duration=100, origin="pointer") \
+ .perform()
+ # mouseup that ends the drag is at the expected destination
+ e = get_events(new_session)
+ assert len(e) >= 5
+ assert e[1]["type"] == "dragstart", "Events captured were {}".format(e)
+ assert e[2]["type"] == "dragover", "Events captured were {}".format(e)
+ drag_events_captured = [
+ ev["type"] for ev in e if ev["type"].startswith("drag") or ev["type"].startswith("drop")
+ ]
+ assert "dragend" in drag_events_captured
+ assert "dragenter" in drag_events_captured
+ assert "dragleave" in drag_events_captured
+ assert "drop" in drag_events_captured
+
+
+@pytest.mark.parametrize("missing", ["x", "y"])
+def test_missing_coordinates(session, test_actions_page, mouse_chain, missing):
+ outer = session.find.css("#outer", all=False)
+ actions = mouse_chain.pointer_move(x=0, y=0, origin=outer)
+ del actions._actions[-1][missing]
+ with pytest.raises(InvalidArgumentException):
+ actions.perform()
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_origin.py b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_origin.py
new file mode 100644
index 0000000000..c4e5db7a00
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_origin.py
@@ -0,0 +1,131 @@
+import pytest
+
+from webdriver import MoveTargetOutOfBoundsException
+
+from tests.perform_actions.support.mouse import get_inview_center, get_viewport_rect
+
+
+@pytest.fixture
+def origin_doc(inline):
+ def origin_doc(inner_style, outer_style=""):
+ return inline("""
+ <div id="outer" style="{1}"
+ onmousemove="window.coords = {{x: event.clientX, y: event.clientY}}">
+ <div id="inner" style="{0}"></div>
+ </div>
+ """.format(inner_style, outer_style))
+ return origin_doc
+
+
+def get_click_coordinates(session):
+ return session.execute_script("return window.coords;")
+
+
+def test_viewport_inside(session, mouse_chain, origin_doc):
+ point = {"x": 50, "y": 50}
+
+ session.url = origin_doc("width: 100px; height: 50px; background: green;")
+ mouse_chain \
+ .pointer_move(point["x"], point["y"], origin="viewport") \
+ .perform()
+
+ click_coords = session.execute_script("return window.coords;")
+ assert click_coords["x"] == pytest.approx(point["x"], abs = 1.0)
+ assert click_coords["y"] == pytest.approx(point["y"], abs = 1.0)
+
+
+def test_viewport_outside(session, mouse_chain):
+ with pytest.raises(MoveTargetOutOfBoundsException):
+ mouse_chain \
+ .pointer_move(-50, -50, origin="viewport") \
+ .perform()
+
+
+def test_pointer_inside(session, mouse_chain, origin_doc):
+ start_point = {"x": 50, "y": 50}
+ offset = {"x": 10, "y": 5}
+
+ session.url = origin_doc("width: 100px; height: 50px; background: green;")
+ mouse_chain \
+ .pointer_move(start_point["x"], start_point["y"]) \
+ .pointer_move(offset["x"], offset["y"], origin="pointer") \
+ .perform()
+
+ click_coords = session.execute_script("return window.coords;")
+ assert click_coords["x"] == pytest.approx(start_point["x"] + offset["x"], abs = 1.0)
+ assert click_coords["y"] == pytest.approx(start_point["y"] + offset["y"], abs = 1.0)
+
+
+def test_pointer_outside(session, mouse_chain):
+ with pytest.raises(MoveTargetOutOfBoundsException):
+ mouse_chain \
+ .pointer_move(-50, -50, origin="pointer") \
+ .perform()
+
+
+def test_element_center_point(session, mouse_chain, origin_doc):
+ session.url = origin_doc("width: 100px; height: 50px; background: green;")
+ elem = session.find.css("#inner", all=False)
+ center = get_inview_center(elem.rect, get_viewport_rect(session))
+
+ mouse_chain \
+ .pointer_move(0, 0, origin=elem) \
+ .perform()
+
+ click_coords = get_click_coordinates(session)
+ assert click_coords["x"] == pytest.approx(center["x"], abs = 1.0)
+ assert click_coords["y"] == pytest.approx(center["y"], abs = 1.0)
+
+
+def test_element_center_point_with_offset(session, mouse_chain, origin_doc):
+ session.url = origin_doc("width: 100px; height: 50px; background: green;")
+ elem = session.find.css("#inner", all=False)
+ center = get_inview_center(elem.rect, get_viewport_rect(session))
+
+ mouse_chain \
+ .pointer_move(10, 15, origin=elem) \
+ .perform()
+
+ click_coords = get_click_coordinates(session)
+ assert click_coords["x"] == pytest.approx(center["x"] + 10, abs = 1.0)
+ assert click_coords["y"] == pytest.approx(center["y"] + 15, abs = 1.0)
+
+
+def test_element_in_view_center_point_partly_visible(session, mouse_chain, origin_doc):
+ session.url = origin_doc("""width: 100px; height: 50px; background: green;
+ position: relative; left: -50px; top: -25px;""")
+ elem = session.find.css("#inner", all=False)
+ center = get_inview_center(elem.rect, get_viewport_rect(session))
+
+ mouse_chain \
+ .pointer_move(0, 0, origin=elem) \
+ .perform()
+
+ click_coords = get_click_coordinates(session)
+ assert click_coords["x"] == pytest.approx(center["x"], abs = 1.0)
+ assert click_coords["y"] == pytest.approx(center["y"], abs = 1.0)
+
+
+def test_element_larger_than_viewport(session, mouse_chain, origin_doc):
+ session.url = origin_doc("width: 300vw; height: 300vh; background: green;")
+ elem = session.find.css("#inner", all=False)
+ center = get_inview_center(elem.rect, get_viewport_rect(session))
+
+ mouse_chain \
+ .pointer_move(0, 0, origin=elem) \
+ .perform()
+
+ click_coords = get_click_coordinates(session)
+ assert click_coords["x"] == pytest.approx(center["x"], abs = 1.0)
+ assert click_coords["y"] == pytest.approx(center["y"], abs = 1.0)
+
+
+def test_element_outside_of_view_port(session, mouse_chain, origin_doc):
+ session.url = origin_doc("""width: 100px; height: 50px; background: green;
+ position: relative; left: -200px; top: -100px;""")
+ elem = session.find.css("#inner", all=False)
+
+ with pytest.raises(MoveTargetOutOfBoundsException):
+ mouse_chain \
+ .pointer_move(0, 0, origin=elem) \
+ .perform()
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_pause_dblclick.py b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_pause_dblclick.py
new file mode 100644
index 0000000000..d46178a1d6
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_pause_dblclick.py
@@ -0,0 +1,53 @@
+from tests.perform_actions.support.mouse import get_inview_center, get_viewport_rect
+from tests.perform_actions.support.refine import get_events
+from tests.support.helpers import filter_dict
+
+_DBLCLICK_INTERVAL = 640
+
+
+def test_dblclick_with_pause_after_second_pointerdown(session, test_actions_page, mouse_chain):
+ outer = session.find.css("#outer", all=False)
+ center = get_inview_center(outer.rect, get_viewport_rect(session))
+ mouse_chain \
+ .pointer_move(int(center["x"]), int(center["y"])) \
+ .click() \
+ .pointer_down() \
+ .pause(_DBLCLICK_INTERVAL + 10) \
+ .pointer_up() \
+ .perform()
+ events = get_events(session)
+ expected = [
+ {"type": "mousedown", "button": 0},
+ {"type": "mouseup", "button": 0},
+ {"type": "click", "button": 0},
+ {"type": "mousedown", "button": 0},
+ {"type": "mouseup", "button": 0},
+ {"type": "click", "button": 0},
+ {"type": "dblclick", "button": 0},
+ ]
+ assert len(events) == 8
+ filtered_events = [filter_dict(e, expected[0]) for e in events]
+ assert expected == filtered_events[1:]
+
+
+def test_no_dblclick(session, test_actions_page, mouse_chain):
+ outer = session.find.css("#outer", all=False)
+ center = get_inview_center(outer.rect, get_viewport_rect(session))
+ mouse_chain \
+ .pointer_move(int(center["x"]), int(center["y"])) \
+ .click() \
+ .pause(_DBLCLICK_INTERVAL + 10) \
+ .click() \
+ .perform()
+ events = get_events(session)
+ expected = [
+ {"type": "mousedown", "button": 0},
+ {"type": "mouseup", "button": 0},
+ {"type": "click", "button": 0},
+ {"type": "mousedown", "button": 0},
+ {"type": "mouseup", "button": 0},
+ {"type": "click", "button": 0},
+ ]
+ assert len(events) == 7
+ filtered_events = [filter_dict(e, expected[0]) for e in events]
+ assert expected == filtered_events[1:]
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_pen.py b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_pen.py
new file mode 100644
index 0000000000..6637f03153
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_pen.py
@@ -0,0 +1,72 @@
+# META: timeout=long
+
+import pytest
+
+from webdriver.error import NoSuchWindowException, StaleElementReferenceException
+
+from tests.perform_actions.support.mouse import get_inview_center, get_viewport_rect
+from tests.perform_actions.support.refine import get_events
+
+
+def test_null_response_value(session, pen_chain):
+ value = pen_chain.click().perform()
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window, pen_chain):
+ with pytest.raises(NoSuchWindowException):
+ pen_chain.click().perform()
+
+
+def test_no_browsing_context(session, closed_frame, pen_chain):
+ with pytest.raises(NoSuchWindowException):
+ pen_chain.click().perform()
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, pen_chain, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ with pytest.raises(StaleElementReferenceException):
+ pen_chain.click(element=element).perform()
+
+
+def test_pen_pointer_properties(session, test_actions_pointer_page, pen_chain):
+ pointerArea = session.find.css("#pointerArea", all=False)
+ center = get_inview_center(pointerArea.rect, get_viewport_rect(session))
+ pen_chain.pointer_move(0, 0, origin=pointerArea) \
+ .pointer_down(pressure=0.36, tilt_x=-72, tilt_y=9, twist=86) \
+ .pointer_move(10, 10, origin=pointerArea) \
+ .pointer_up() \
+ .pointer_move(80, 50, origin=pointerArea) \
+ .perform()
+ events = get_events(session)
+ assert len(events) == 10
+ event_types = [e["type"] for e in events]
+ assert ["pointerover", "pointerenter", "pointermove", "pointerdown",
+ "pointerover", "pointerenter", "pointermove", "pointerup",
+ "pointerout", "pointerleave"] == event_types
+ assert events[3]["type"] == "pointerdown"
+ assert events[3]["pageX"] == pytest.approx(center["x"], abs=1.0)
+ assert events[3]["pageY"] == pytest.approx(center["y"], abs=1.0)
+ assert events[3]["target"] == "pointerArea"
+ assert events[3]["pointerType"] == "pen"
+ # The default value of width and height for mouse and pen inputs is 1
+ assert round(events[3]["width"], 2) == 1
+ assert round(events[3]["height"], 2) == 1
+ assert round(events[3]["pressure"], 2) == 0.36
+ assert events[3]["tiltX"] == -72
+ assert events[3]["tiltY"] == 9
+ assert events[3]["twist"] == 86
+ assert events[6]["type"] == "pointermove"
+ assert events[6]["pageX"] == pytest.approx(center["x"]+10, abs=1.0)
+ assert events[6]["pageY"] == pytest.approx(center["y"]+10, abs=1.0)
+ assert events[6]["target"] == "pointerArea"
+ assert events[6]["pointerType"] == "pen"
+ assert round(events[6]["width"], 2) == 1
+ assert round(events[6]["height"], 2) == 1
+ # The default value of pressure for all inputs is 0.5, other properties are 0
+ assert round(events[6]["pressure"], 2) == 0.5
+ assert events[6]["tiltX"] == 0
+ assert events[6]["tiltY"] == 0
+ assert events[6]["twist"] == 0
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_touch.py b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_touch.py
new file mode 100644
index 0000000000..9e76273cbb
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_touch.py
@@ -0,0 +1,90 @@
+# META: timeout=long
+
+import pytest
+
+from webdriver.error import NoSuchWindowException, StaleElementReferenceException
+from tests.perform_actions.support.mouse import get_inview_center, get_viewport_rect
+from tests.perform_actions.support.refine import get_events
+
+
+def test_null_response_value(session, touch_chain):
+ value = touch_chain.click().perform()
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window, touch_chain):
+ with pytest.raises(NoSuchWindowException):
+ touch_chain.click().perform()
+
+
+def test_no_browsing_context(session, closed_frame, touch_chain):
+ with pytest.raises(NoSuchWindowException):
+ touch_chain.click().perform()
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, touch_chain, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ with pytest.raises(StaleElementReferenceException):
+ touch_chain.click(element=element).perform()
+
+
+def test_touch_pointer_properties(session, test_actions_pointer_page, touch_chain):
+ pointerArea = session.find.css("#pointerArea", all=False)
+ center = get_inview_center(pointerArea.rect, get_viewport_rect(session))
+ touch_chain.pointer_move(0, 0, origin=pointerArea) \
+ .pointer_down(width=23, height=31, pressure=0.78, tilt_x=21, tilt_y=-8, twist=355) \
+ .pointer_move(10, 10, origin=pointerArea, width=39, height=35, pressure=0.91, tilt_x=-19, tilt_y=62, twist=345) \
+ .pointer_up() \
+ .pointer_move(80, 50, origin=pointerArea) \
+ .perform()
+ events = get_events(session)
+ assert len(events) == 7
+ event_types = [e["type"] for e in events]
+ assert ["pointerover", "pointerenter", "pointerdown", "pointermove",
+ "pointerup", "pointerout", "pointerleave"] == event_types
+ assert events[2]["type"] == "pointerdown"
+ assert events[2]["pageX"] == pytest.approx(center["x"], abs=1.0)
+ assert events[2]["pageY"] == pytest.approx(center["y"], abs=1.0)
+ assert events[2]["target"] == "pointerArea"
+ assert events[2]["pointerType"] == "touch"
+ assert round(events[2]["width"], 2) == 23
+ assert round(events[2]["height"], 2) == 31
+ assert round(events[2]["pressure"], 2) == 0.78
+ assert events[3]["type"] == "pointermove"
+ assert events[3]["pageX"] == pytest.approx(center["x"]+10, abs=1.0)
+ assert events[3]["pageY"] == pytest.approx(center["y"]+10, abs=1.0)
+ assert events[3]["target"] == "pointerArea"
+ assert events[3]["pointerType"] == "touch"
+ assert round(events[3]["width"], 2) == 39
+ assert round(events[3]["height"], 2) == 35
+ assert round(events[3]["pressure"], 2) == 0.91
+
+
+def test_touch_pointer_properties_tilt_twist(session, test_actions_pointer_page, touch_chain):
+ # This test only covers the tilt/twist properties which are
+ # more specific to pen-type pointers, but which the spec allows
+ # for generic touch pointers. Seperating this out gives better
+ # coverage of the basic properties in test_touch_pointer_properties
+ pointerArea = session.find.css("#pointerArea", all=False)
+ center = get_inview_center(pointerArea.rect, get_viewport_rect(session))
+ touch_chain.pointer_move(0, 0, origin=pointerArea) \
+ .pointer_down(width=23, height=31, pressure=0.78, tilt_x=21, tilt_y=-8, twist=355) \
+ .pointer_move(10, 10, origin=pointerArea, width=39, height=35, pressure=0.91, tilt_x=-19, tilt_y=62, twist=345) \
+ .pointer_up() \
+ .pointer_move(80, 50, origin=pointerArea) \
+ .perform()
+ events = get_events(session)
+ assert len(events) == 7
+ event_types = [e["type"] for e in events]
+ assert ["pointerover", "pointerenter", "pointerdown", "pointermove",
+ "pointerup", "pointerout", "pointerleave"] == event_types
+ assert events[2]["type"] == "pointerdown"
+ assert events[2]["tiltX"] == 21
+ assert events[2]["tiltY"] == -8
+ assert events[2]["twist"] == 355
+ assert events[3]["type"] == "pointermove"
+ assert events[3]["tiltX"] == -19
+ assert events[3]["tiltY"] == 62
+ assert events[3]["twist"] == 345
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_tripleclick.py b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_tripleclick.py
new file mode 100644
index 0000000000..fff70b8fa6
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/pointer_tripleclick.py
@@ -0,0 +1,36 @@
+import math
+
+from tests.perform_actions.support.refine import get_events
+from tests.support.asserts import assert_move_to_coordinates
+from tests.support.helpers import filter_dict
+
+lots_of_text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor "\
+ "incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud "\
+ "exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat."
+
+
+def test_tripleclick_at_coordinates(session, mouse_chain, inline):
+ """
+ This test does a triple click on a coordinate. On desktop platforms
+ this will select a paragraph. On mobile this will not have the same
+ desired outcome as taps are handled differently on mobile.
+ """
+ session.url = inline("""<div>
+ {}
+ </div>""".format(lots_of_text))
+ div = session.find.css("div", all=False)
+ div_rect = div.rect
+ div_centre = {
+ "x": math.floor(div_rect["x"] + div_rect["width"] / 2),
+ "y": math.floor(div_rect["y"] + div_rect["height"] / 2)
+ }
+ mouse_chain \
+ .pointer_move(div_centre["x"], div_centre["y"]) \
+ .click() \
+ .click() \
+ .click() \
+ .perform()
+
+ actual_text = session.execute_script("return document.getSelection().toString();")
+
+ assert actual_text == lots_of_text
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/sequence.py b/testing/web-platform/tests/webdriver/tests/perform_actions/sequence.py
new file mode 100644
index 0000000000..3536abeb12
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/sequence.py
@@ -0,0 +1,9 @@
+# META: timeout=long
+
+from tests.perform_actions.support.refine import get_events, get_keys
+
+
+def test_perform_no_actions_send_no_events(session, key_reporter, key_chain):
+ key_chain.perform()
+ assert len(get_keys(key_reporter)) == 0
+ assert len(get_events(session)) == 0
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/support/__init__.py b/testing/web-platform/tests/webdriver/tests/perform_actions/support/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/support/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/support/keys.py b/testing/web-platform/tests/webdriver/tests/perform_actions/support/keys.py
new file mode 100644
index 0000000000..e835a6a596
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/support/keys.py
@@ -0,0 +1,905 @@
+# Licensed to the Software Freedom Conservancy (SFC) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The SFC licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+"""The Keys implementation."""
+
+import sys
+
+from collections import OrderedDict
+from inspect import getmembers
+
+class Keys(object):
+ """
+ Set of special keys codes.
+
+ See also https://w3c.github.io/webdriver/#keyboard-actions
+ """
+
+ NULL = u"\ue000"
+ CANCEL = u"\ue001" # ^break
+ HELP = u"\ue002"
+ BACKSPACE = u"\ue003"
+ TAB = u"\ue004"
+ CLEAR = u"\ue005"
+ RETURN = u"\ue006"
+ ENTER = u"\ue007"
+ SHIFT = u"\ue008"
+ CONTROL = u"\ue009"
+ ALT = u"\ue00a"
+ PAUSE = u"\ue00b"
+ ESCAPE = u"\ue00c"
+ SPACE = u"\ue00d"
+ PAGE_UP = u"\ue00e"
+ PAGE_DOWN = u"\ue00f"
+ END = u"\ue010"
+ HOME = u"\ue011"
+ LEFT = u"\ue012"
+ UP = u"\ue013"
+ RIGHT = u"\ue014"
+ DOWN = u"\ue015"
+ INSERT = u"\ue016"
+ DELETE = u"\ue017"
+ SEMICOLON = u"\ue018"
+ EQUALS = u"\ue019"
+
+ NUMPAD0 = u"\ue01a" # number pad keys
+ NUMPAD1 = u"\ue01b"
+ NUMPAD2 = u"\ue01c"
+ NUMPAD3 = u"\ue01d"
+ NUMPAD4 = u"\ue01e"
+ NUMPAD5 = u"\ue01f"
+ NUMPAD6 = u"\ue020"
+ NUMPAD7 = u"\ue021"
+ NUMPAD8 = u"\ue022"
+ NUMPAD9 = u"\ue023"
+ MULTIPLY = u"\ue024"
+ ADD = u"\ue025"
+ SEPARATOR = u"\ue026"
+ SUBTRACT = u"\ue027"
+ DECIMAL = u"\ue028"
+ DIVIDE = u"\ue029"
+
+ F1 = u"\ue031" # function keys
+ F2 = u"\ue032"
+ F3 = u"\ue033"
+ F4 = u"\ue034"
+ F5 = u"\ue035"
+ F6 = u"\ue036"
+ F7 = u"\ue037"
+ F8 = u"\ue038"
+ F9 = u"\ue039"
+ F10 = u"\ue03a"
+ F11 = u"\ue03b"
+ F12 = u"\ue03c"
+
+ META = u"\ue03d"
+
+ # More keys from webdriver spec
+ ZENKAKUHANKAKU = u"\uE040"
+ R_SHIFT = u"\uE050"
+ R_CONTROL = u"\uE051"
+ R_ALT = u"\uE052"
+ R_META = u"\uE053"
+ R_PAGEUP = u"\uE054"
+ R_PAGEDOWN = u"\uE055"
+ R_END = u"\uE056"
+ R_HOME = u"\uE057"
+ R_ARROWLEFT = u"\uE058"
+ R_ARROWUP = u"\uE059"
+ R_ARROWRIGHT = u"\uE05A"
+ R_ARROWDOWN = u"\uE05B"
+ R_INSERT = u"\uE05C"
+ R_DELETE = u"\uE05D"
+
+
+ALL_KEYS = getmembers(Keys, lambda x: type(x) == str)
+
+ALL_EVENTS = OrderedDict(
+ [
+ ("ADD", OrderedDict(
+ [
+ ("code", "NumpadAdd"),
+ ("ctrl", False),
+ ("key", "+"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue025")
+ ]
+ )),
+ ("ALT", OrderedDict(
+ [
+ ("code", "AltLeft"),
+ ("ctrl", False),
+ ("key", "Alt"),
+ ("location", 1),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue00a")
+ ]
+ )),
+ ("BACKSPACE", OrderedDict(
+ [
+ ("code", "Backspace"),
+ ("ctrl", False),
+ ("key", "Backspace"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue003")
+ ]
+ )),
+ ("CANCEL", OrderedDict(
+ [
+ ("code", ""),
+ ("ctrl", False),
+ ("key", "Cancel"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue001")
+ ]
+ )),
+ ("CLEAR", OrderedDict(
+ [
+ ("code", ""),
+ ("ctrl", False),
+ ("key", "Clear"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue005")
+ ]
+ )),
+ ("CONTROL", OrderedDict(
+ [
+ ("code", "ControlLeft"),
+ ("ctrl", True),
+ ("key", "Control"),
+ ("location", 1),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue009")
+ ]
+ )),
+ ("DECIMAL", OrderedDict(
+ [
+ ("code", "NumpadDecimal"),
+ ("ctrl", False),
+ ("key", "."),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue028")
+ ]
+ )),
+ ("DELETE", OrderedDict(
+ [
+ ("code", "Delete"),
+ ("ctrl", False),
+ ("key", "Delete"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue017")
+ ]
+ )),
+ ("DIVIDE", OrderedDict(
+ [
+ ("code", "NumpadDivide"),
+ ("ctrl", False),
+ ("key", "/"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue029")
+ ]
+ )),
+ ("DOWN", OrderedDict(
+ [
+ ("code", "ArrowDown"),
+ ("ctrl", False),
+ ("key", "ArrowDown"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue015")
+ ]
+ )),
+ ("END", OrderedDict(
+ [
+ ("code", "End"),
+ ("ctrl", False),
+ ("key", "End"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue010")
+ ]
+ )),
+ ("ENTER", OrderedDict(
+ [
+ ("code", "NumpadEnter"),
+ ("ctrl", False),
+ ("key", "Enter"),
+ ("location", 1),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue007")
+ ]
+ )),
+ ("EQUALS", OrderedDict(
+ [
+ ("code", ""),
+ ("ctrl", False),
+ ("key", "="),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue019")
+ ]
+ )),
+ ("ESCAPE", OrderedDict(
+ [
+ ("code", "Escape"),
+ ("ctrl", False),
+ ("key", "Escape"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue00c")
+ ]
+ )),
+ ("F1", OrderedDict(
+ [
+ ("code", "F1"),
+ ("ctrl", False),
+ ("key", "F1"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue031")
+ ]
+ )),
+ ("F10", OrderedDict(
+ [
+ ("code", "F10"),
+ ("ctrl", False),
+ ("key", "F10"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue03a")
+ ]
+ )),
+ ("F11", OrderedDict(
+ [
+ ("code", "F11"),
+ ("ctrl", False),
+ ("key", "F11"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue03b")
+ ]
+ )),
+ ("F12", OrderedDict(
+ [
+ ("code", "F12"),
+ ("ctrl", False),
+ ("key", "F12"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue03c")
+ ]
+ )),
+ ("F2", OrderedDict(
+ [
+ ("code", "F2"),
+ ("ctrl", False),
+ ("key", "F2"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue032")
+ ]
+ )),
+ ("F3", OrderedDict(
+ [
+ ("code", "F3"),
+ ("ctrl", False),
+ ("key", "F3"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue033")
+ ]
+ )),
+ ("F4", OrderedDict(
+ [
+ ("code", "F4"),
+ ("ctrl", False),
+ ("key", "F4"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue034")
+ ]
+ )),
+ ("F5", OrderedDict(
+ [
+ ("code", "F5"),
+ ("ctrl", False),
+ ("key", "F5"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue035")
+ ]
+ )),
+ ("F6", OrderedDict(
+ [
+ ("code", "F6"),
+ ("ctrl", False),
+ ("key", "F6"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue036")
+ ]
+ )),
+ ("F7", OrderedDict(
+ [
+ ("code", "F7"),
+ ("ctrl", False),
+ ("key", "F7"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue037")
+ ]
+ )),
+ ("F8", OrderedDict(
+ [
+ ("code", "F8"),
+ ("ctrl", False),
+ ("key", "F8"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue038")
+ ]
+ )),
+ ("F9", OrderedDict(
+ [
+ ("code", "F9"),
+ ("ctrl", False),
+ ("key", "F9"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue039")
+ ]
+ )),
+ ("HELP", OrderedDict(
+ [
+ ("code", "Help"),
+ ("ctrl", False),
+ ("key", "Help"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue002")
+ ]
+ )),
+ ("HOME", OrderedDict(
+ [
+ ("code", "Home"),
+ ("ctrl", False),
+ ("key", "Home"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue011")
+ ]
+ )),
+ ("INSERT", OrderedDict(
+ [
+ ("code", "Insert"),
+ ("ctrl", False),
+ ("key", "Insert"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue016")
+ ]
+ )),
+ ("LEFT", OrderedDict(
+ [
+ ("code", "ArrowLeft"),
+ ("ctrl", False),
+ ("key", "ArrowLeft"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue012")
+ ]
+ )),
+ ("META", OrderedDict(
+ [
+ ("code", "OSLeft"),
+ ("ctrl", False),
+ ("key", "Meta"),
+ ("location", 1),
+ ("meta", True),
+ ("shift", False),
+ ("value", u"\ue03d")
+ ]
+ )),
+ ("MULTIPLY", OrderedDict(
+ [
+ ("code", "NumpadMultiply"),
+ ("ctrl", False),
+ ("key", "*"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue024")
+ ]
+ )),
+ ("NULL", OrderedDict(
+ [
+ ("code", ""),
+ ("ctrl", False),
+ ("key", "Unidentified"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue000")
+ ]
+ )),
+ ("NUMPAD0", OrderedDict(
+ [
+ ("code", "Numpad0"),
+ ("ctrl", False),
+ ("key", "0"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue01a")
+ ]
+ )),
+ ("NUMPAD1", OrderedDict(
+ [
+ ("code", "Numpad1"),
+ ("ctrl", False),
+ ("key", "1"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue01b")
+ ]
+ )),
+ ("NUMPAD2", OrderedDict(
+ [
+ ("code", "Numpad2"),
+ ("ctrl", False),
+ ("key", "2"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue01c")
+ ]
+ )),
+ ("NUMPAD3", OrderedDict(
+ [
+ ("code", "Numpad3"),
+ ("ctrl", False),
+ ("key", "3"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue01d")
+ ]
+ )),
+ ("NUMPAD4", OrderedDict(
+ [
+ ("code", "Numpad4"),
+ ("ctrl", False),
+ ("key", "4"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue01e")
+ ]
+ )),
+ ("NUMPAD5", OrderedDict(
+ [
+ ("code", "Numpad5"),
+ ("ctrl", False),
+ ("key", "5"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue01f")
+ ]
+ )),
+ ("NUMPAD6", OrderedDict(
+ [
+ ("code", "Numpad6"),
+ ("ctrl", False),
+ ("key", "6"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue020")
+ ]
+ )),
+ ("NUMPAD7", OrderedDict(
+ [
+ ("code", "Numpad7"),
+ ("ctrl", False),
+ ("key", "7"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue021")
+ ]
+ )),
+ ("NUMPAD8", OrderedDict(
+ [
+ ("code", "Numpad8"),
+ ("ctrl", False),
+ ("key", "8"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue022")
+ ]
+ )),
+ ("NUMPAD9", OrderedDict(
+ [
+ ("code", "Numpad9"),
+ ("ctrl", False),
+ ("key", "9"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue023")
+ ]
+ )),
+ ("PAGE_DOWN", OrderedDict(
+ [
+ ("code", "PageDown"),
+ ("ctrl", False),
+ ("key", "PageDown"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue00f")
+ ]
+ )),
+ ("PAGE_UP", OrderedDict(
+ [
+ ("code", "PageUp"),
+ ("ctrl", False),
+ ("key", "PageUp"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue00e")
+ ]
+ )),
+ ("PAUSE", OrderedDict(
+ [
+ ("code", ""),
+ ("ctrl", False),
+ ("key", "Pause"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue00b")
+ ]
+ )),
+ ("RETURN", OrderedDict(
+ [
+ ("code", "Enter"),
+ ("ctrl", False),
+ ("key", "Enter"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue006")
+ ]
+ )),
+ ("RIGHT", OrderedDict(
+ [
+ ("code", "ArrowRight"),
+ ("ctrl", False),
+ ("key", "ArrowRight"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue014")
+ ]
+ )),
+ ("R_ALT", OrderedDict(
+ [
+ ("code", "AltRight"),
+ ("ctrl", False),
+ ("key", "Alt"),
+ ("location", 2),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue052")
+ ]
+ )),
+ ("R_ARROWDOWN", OrderedDict(
+ [
+ ("code", "Numpad2"),
+ ("ctrl", False),
+ ("key", "ArrowDown"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue05b")
+ ]
+ )),
+ ("R_ARROWLEFT", OrderedDict(
+ [
+ ("code", "Numpad4"),
+ ("ctrl", False),
+ ("key", "ArrowLeft"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue058")
+ ]
+ )),
+ ("R_ARROWRIGHT", OrderedDict(
+ [
+ ("code", "Numpad6"),
+ ("ctrl", False),
+ ("key", "ArrowRight"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue05a")
+ ]
+ )),
+ ("R_ARROWUP", OrderedDict(
+ [
+ ("code", "Numpad8"),
+ ("ctrl", False),
+ ("key", "ArrowUp"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue059")
+ ]
+ )),
+ ("R_CONTROL", OrderedDict(
+ [
+ ("code", "ControlRight"),
+ ("ctrl", True),
+ ("key", "Control"),
+ ("location", 2),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue051")
+ ]
+ )),
+ ("R_DELETE", OrderedDict(
+ [
+ ("code", "NumpadDecimal"),
+ ("ctrl", False),
+ ("key", "Delete"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue05d")
+ ]
+ )),
+ ("R_END", OrderedDict(
+ [
+ ("code", "Numpad1"),
+ ("ctrl", False),
+ ("key", "End"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue056")
+ ]
+ )),
+ ("R_HOME", OrderedDict(
+ [
+ ("code", "Numpad7"),
+ ("ctrl", False),
+ ("key", "Home"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue057")
+ ]
+ )),
+ ("R_INSERT", OrderedDict(
+ [
+ ("code", "Numpad0"),
+ ("ctrl", False),
+ ("key", "Insert"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue05c")
+ ]
+ )),
+ ("R_META", OrderedDict(
+ [
+ ("code", "OSRight"),
+ ("ctrl", False),
+ ("key", "Meta"),
+ ("location", 2),
+ ("meta", True),
+ ("shift", False),
+ ("value", u"\ue053")
+ ]
+ )),
+ ("R_PAGEDOWN", OrderedDict(
+ [
+ ("code", "Numpad3"),
+ ("ctrl", False),
+ ("key", "PageDown"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue055")
+ ]
+ )),
+ ("R_PAGEUP", OrderedDict(
+ [
+ ("code", "Numpad9"),
+ ("ctrl", False),
+ ("key", "PageUp"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue054")
+ ]
+ )),
+ ("R_SHIFT", OrderedDict(
+ [
+ ("code", "ShiftRight"),
+ ("ctrl", False),
+ ("key", "Shift"),
+ ("location", 2),
+ ("meta", False),
+ ("shift", True),
+ ("value", u"\ue050")
+ ]
+ )),
+ ("SEMICOLON", OrderedDict(
+ [
+ ("code", ""),
+ ("ctrl", False),
+ ("key", ";"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue018")
+ ]
+ )),
+ ("SEPARATOR", OrderedDict(
+ [
+ ("code", "NumpadComma"),
+ ("ctrl", False),
+ ("key", ","),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue026")
+ ]
+ )),
+ ("SHIFT", OrderedDict(
+ [
+ ("code", "ShiftLeft"),
+ ("ctrl", False),
+ ("key", "Shift"),
+ ("location", 1),
+ ("meta", False),
+ ("shift", True),
+ ("value", u"\ue008")
+ ]
+ )),
+ ("SPACE", OrderedDict(
+ [
+ ("code", "Space"),
+ ("ctrl", False),
+ ("key", " "),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue00d")
+ ]
+ )),
+ ("SUBTRACT", OrderedDict(
+ [
+ ("code", "NumpadSubtract"),
+ ("ctrl", False),
+ ("key", "-"),
+ ("location", 3),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue027")
+ ]
+ )),
+ ("TAB", OrderedDict(
+ [
+ ("code", "Tab"),
+ ("ctrl", False),
+ ("key", "Tab"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue004")
+ ]
+ )),
+ ("UP", OrderedDict(
+ [
+ ("code", "ArrowUp"),
+ ("ctrl", False),
+ ("key", "ArrowUp"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue013")
+ ]
+ )),
+ ("ZENKAKUHANKAKU", OrderedDict(
+ [
+ ("code", ""),
+ ("ctrl", False),
+ ("key", "ZenkakuHankaku"),
+ ("location", 0),
+ ("meta", False),
+ ("shift", False),
+ ("value", u"\ue040")
+ ]
+ ))
+ ]
+)
+
+ALTERNATIVE_KEY_NAMES = {
+ "ADD": "Add",
+ "DECIMAL": "Decimal",
+ "DELETE": "Del",
+ "DIVIDE": "Divide",
+ "DOWN": "Down",
+ "ESCAPE": "Esc",
+ "LEFT": "Left",
+ "MULTIPLY": "Multiply",
+ "R_ARROWDOWN": "Down",
+ "R_ARROWLEFT": "Left",
+ "R_ARROWRIGHT": "Right",
+ "R_ARROWUP": "Up",
+ "R_DELETE": "Del",
+ "RIGHT": "Right",
+ "SEPARATOR": "Separator",
+ "SPACE": "Spacebar",
+ "SUBTRACT": "Subtract",
+ "UP": "Up",
+}
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/support/mouse.py b/testing/web-platform/tests/webdriver/tests/perform_actions/support/mouse.py
new file mode 100644
index 0000000000..b3672eb213
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/support/mouse.py
@@ -0,0 +1,26 @@
+def get_viewport_rect(session):
+ return session.execute_script("""
+ return {
+ height: window.innerHeight || document.documentElement.clientHeight,
+ width: window.innerWidth || document.documentElement.clientWidth,
+ };
+ """)
+
+
+def get_inview_center(elem_rect, viewport_rect):
+ x = {
+ "left": max(0, min(elem_rect["x"], elem_rect["x"] + elem_rect["width"])),
+ "right": min(viewport_rect["width"], max(elem_rect["x"],
+ elem_rect["x"] + elem_rect["width"])),
+ }
+
+ y = {
+ "top": max(0, min(elem_rect["y"], elem_rect["y"] + elem_rect["height"])),
+ "bottom": min(viewport_rect["height"], max(elem_rect["y"],
+ elem_rect["y"] + elem_rect["height"])),
+ }
+
+ return {
+ "x": (x["left"] + x["right"]) / 2,
+ "y": (y["top"] + y["bottom"]) / 2,
+ }
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/support/refine.py b/testing/web-platform/tests/webdriver/tests/perform_actions/support/refine.py
new file mode 100644
index 0000000000..35c962b9ec
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/support/refine.py
@@ -0,0 +1,29 @@
+def get_events(session):
+ """Return list of key events recorded in the test_keys_page fixture."""
+ events = session.execute_script("return allEvents.events;") or []
+ # `key` values in `allEvents` may be escaped (see `escapeSurrogateHalf` in
+ # test_keys_wdspec.html), so this converts them back into unicode literals.
+ for e in events:
+ # example: turn "U+d83d" (6 chars) into u"\ud83d" (1 char)
+ if "key" in e and e["key"].startswith(u"U+"):
+ key = e["key"]
+ hex_suffix = key[key.index("+") + 1:]
+ e["key"] = unichr(int(hex_suffix, 16))
+
+ # WebKit sets code as 'Unidentified' for unidentified key codes, but
+ # tests expect ''.
+ if "code" in e and e["code"] == "Unidentified":
+ e["code"] = ""
+ return events
+
+
+def get_keys(input_el):
+ """Get printable characters entered into `input_el`.
+
+ :param input_el: HTML input element.
+ """
+ rv = input_el.property("value")
+ if rv is None:
+ return ""
+ else:
+ return rv
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_pointer_wdspec.html b/testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_pointer_wdspec.html
new file mode 100644
index 0000000000..f1fd9b2da3
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_pointer_wdspec.html
@@ -0,0 +1,102 @@
+<!doctype html>
+<meta charset=utf-8>
+<html>
+<head>
+ <title>Test Actions</title>
+ <style>
+ div { padding:0px; margin: 0px; }
+ #trackPointer { position: fixed; }
+ #resultContainer { width: 600px; height: 60px; }
+ .area { width: 100px; height: 50px; background-color: #ccc; }
+ </style>
+ <script>
+ "use strict";
+ var els = {};
+ var allEvents = { events: [] };
+ function displayMessage(message) {
+ document.getElementById("events").innerHTML = "<p>" + message + "</p>";
+ }
+
+ function appendMessage(message) {
+ document.getElementById("events").innerHTML += "<p>" + message + "</p>";
+ }
+
+ function recordPointerEvent(event) {
+ if (event.type === "contextmenu") {
+ event.preventDefault();
+ }
+ allEvents.events.push({
+ "type": event.type,
+ "button": event.button,
+ "buttons": event.buttons,
+ "pageX": event.pageX,
+ "pageY": event.pageY,
+ "ctrlKey": event.ctrlKey,
+ "metaKey": event.metaKey,
+ "altKey": event.altKey,
+ "shiftKey": event.shiftKey,
+ "target": event.target.id,
+ "pointerType": event.pointerType,
+ "width": event.width,
+ "height": event.height,
+ "pressure": event.pressure,
+ "tangentialPressure": event.tangentialPressure,
+ "tiltX": event.tiltX,
+ "tiltY": event.tiltY,
+ "twist": event.twist,
+ "altitudeAngle": event.altitudeAngle,
+ "azimuthAngle": event.azimuthAngle
+ });
+ appendMessage(event.type + " " +
+ "button: " + event.button + ", " +
+ "pageX: " + event.pageX + ", " +
+ "pageY: " + event.pageY + ", " +
+ "button: " + event.button + ", " +
+ "buttons: " + event.buttons + ", " +
+ "ctrlKey: " + event.ctrlKey + ", " +
+ "altKey: " + event.altKey + ", " +
+ "metaKey: " + event.metaKey + ", " +
+ "shiftKey: " + event.shiftKey + ", " +
+ "target id: " + event.target.id + ", " +
+ "pointerType: " + event.pointerType + ", " +
+ "width: " + event.width + ", " +
+ "height: " + event.height + ", " +
+ "pressure: " + event.pressure + ", " +
+ "tangentialPressure: " + event.tangentialPressure + ", " +
+ "tiltX: " + event.tiltX + ", " +
+ "tiltY: " + event.tiltY + ", " +
+ "twist: " + event.twist + ", " +
+ "altitudeAngle: " + event.altitudeAngle + ", " +
+ "azimuthAngle: " + event.azimuthAngle);
+ }
+
+ function resetEvents() {
+ allEvents.events.length = 0;
+ displayMessage("");
+ }
+
+ document.addEventListener("DOMContentLoaded", function() {
+ var pointerArea = document.getElementById("pointerArea");
+ pointerArea.addEventListener("pointerdown", recordPointerEvent);
+ pointerArea.addEventListener("pointermove", recordPointerEvent);
+ pointerArea.addEventListener("pointerup", recordPointerEvent);
+ pointerArea.addEventListener("pointerover", recordPointerEvent);
+ pointerArea.addEventListener("pointerenter", recordPointerEvent);
+ pointerArea.addEventListener("pointerout", recordPointerEvent);
+ pointerArea.addEventListener("pointerleave", recordPointerEvent);
+ });
+ </script>
+</head>
+<body>
+ <div id="trackPointer" class="block"></div>
+ <div>
+ <h2>PointerReporter</h2>
+ <div id="pointerArea" class="area">
+ </div>
+ </div>
+ <div id="resultContainer">
+ <h2>Events</h2>
+ <div id="events"></div>
+ </div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_scroll_wdspec.html b/testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_scroll_wdspec.html
new file mode 100644
index 0000000000..b6e281e581
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_scroll_wdspec.html
@@ -0,0 +1,103 @@
+<!doctype html>
+<meta charset=utf-8>
+<html>
+<head>
+ <title>Test Actions</title>
+ <style>
+ div { padding:0px; margin: 0px; }
+ .area { width: 100px; height: 50px; background-color: #ccc; }
+ #scrollable { width: 100px; height: 100px; overflow: scroll; }
+ #scrollContent { width: 600px; height: 1000px; background-color: blue; }
+ #subframe { width: 100px; height: 100px; }
+ </style>
+ <script>
+ "use strict";
+ var els = {};
+ var allEvents = { events: [] };
+ function displayMessage(message) {
+ document.getElementById("events").innerHTML = "<p>" + message + "</p>";
+ }
+
+ function appendMessage(message) {
+ document.getElementById("events").innerHTML += "<p>" + message + "</p>";
+ }
+
+ function recordWheelEvent(event) {
+ allEvents.events.push({
+ "type": event.type,
+ "button": event.button,
+ "buttons": event.buttons,
+ "deltaX": event.deltaX,
+ "deltaY": event.deltaY,
+ "deltaZ": event.deltaZ,
+ "deltaMode": event.deltaMode,
+ "target": event.target.id
+ });
+ appendMessage(event.type + " " +
+ "button: " + event.button + ", " +
+ "pageX: " + event.pageX + ", " +
+ "pageY: " + event.pageY + ", " +
+ "button: " + event.button + ", " +
+ "buttons: " + event.buttons + ", " +
+ "deltaX: " + event.deltaX + ", " +
+ "deltaY: " + event.deltaY + ", " +
+ "deltaZ: " + event.deltaZ + ", " +
+ "deltaMode: " + event.deltaMode + ", " +
+ "target id: " + event.target.id);
+ }
+
+ function resetEvents() {
+ allEvents.events.length = 0;
+ displayMessage("");
+ }
+
+ document.addEventListener("DOMContentLoaded", function() {
+ var outer = document.getElementById("outer");
+ outer.addEventListener("wheel", recordWheelEvent);
+
+ var scrollable = document.getElementById("scrollable");
+ scrollable.addEventListener("wheel", recordWheelEvent);
+ });
+ </script>
+</head>
+<body>
+ <div>
+ <h2>ScrollReporter</h2>
+ <div id="outer" class="area">
+ </div>
+ </div>
+ <div>
+ <h2>OverflowScrollReporter</h2>
+ <div id="scrollable">
+ <div id="scrollContent"></div>
+ </div>
+ </div>
+ <div>
+ <h2>IframeScrollReporter</h2>
+ <iframe id='subframe' srcdoc='
+ <script>
+ document.scrollingElement.addEventListener("wheel",
+ function(event) {
+ window.parent.allEvents.events.push({
+ "type": event.type,
+ "button": event.button,
+ "buttons": event.buttons,
+ "deltaX": event.deltaX,
+ "deltaY": event.deltaY,
+ "deltaZ": event.deltaZ,
+ "deltaMode": event.deltaMode,
+ "target": event.target.id
+ });
+ }
+ );
+ </script>
+ <div id="iframeContent"
+ style="width: 7500px; height: 7500px; background-color:blue" ></div>'>
+ </iframe>
+ </div>
+ <div id="resultContainer">
+ <h2>Events</h2>
+ <div id="events"></div>
+ </div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_wdspec.html b/testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_wdspec.html
new file mode 100644
index 0000000000..0253add960
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/support/test_actions_wdspec.html
@@ -0,0 +1,216 @@
+<!doctype html>
+<meta charset=utf-8>
+<html>
+<head>
+ <title>Test Actions</title>
+ <style>
+ div { padding:0px; margin: 0px; }
+ #trackPointer { position: fixed; }
+ #resultContainer { width: 600px; height: 60px; }
+ .area { width: 100px; height: 50px; background-color: #ccc; }
+ .block { width: 5px; height: 5px; border: solid 1px red; }
+ .box { display: flex;}
+ #dragArea { position: relative; }
+ #dragTarget { position: absolute; top:22px; left:47px;}
+ </style>
+ <script>
+ "use strict";
+ var els = {};
+ var allEvents = { events: [] };
+ function displayMessage(message) {
+ document.getElementById("events").innerHTML = "<p>" + message + "</p>";
+ }
+
+ function appendMessage(message) {
+ document.getElementById("events").innerHTML += "<p>" + message + "</p>";
+ }
+
+ /**
+ * Escape |key| if it's in a surrogate-half character range.
+ *
+ * Example: given "\ud83d" return "U+d83d".
+ *
+ * Otherwise JSON.stringify will convert it to U+FFFD (REPLACEMENT CHARACTER)
+ * when returning a value from executeScript, for example.
+ */
+ function escapeSurrogateHalf(key) {
+ if (typeof key !== "undefined" && key.length === 1) {
+ var charCode = key.charCodeAt(0);
+ var highSurrogate = charCode >= 0xD800 && charCode <= 0xDBFF;
+ var surrogate = highSurrogate || (charCode >= 0xDC00 && charCode <= 0xDFFF);
+ if (surrogate) {
+ key = "U+" + charCode.toString(16);
+ }
+ }
+ return key;
+ }
+
+ function recordKeyboardEvent(event) {
+ var key = escapeSurrogateHalf(event.key);
+ allEvents.events.push({
+ "code": event.code,
+ "key": key,
+ "which": event.which,
+ "location": event.location,
+ "ctrl": event.ctrlKey,
+ "meta": event.metaKey,
+ "shift": event.shiftKey,
+ "repeat": event.repeat,
+ "type": event.type
+ });
+ appendMessage(event.type + " " +
+ "code: " + event.code + ", " +
+ "key: " + key + ", " +
+ "which: " + event.which + ", " +
+ "keyCode: " + event.keyCode);
+ }
+
+ function recordPointerEvent(event) {
+ if (event.type === "contextmenu") {
+ event.preventDefault();
+ }
+ allEvents.events.push({
+ "type": event.type,
+ "button": event.button,
+ "buttons": event.buttons,
+ "pageX": event.pageX,
+ "pageY": event.pageY,
+ "ctrlKey": event.ctrlKey,
+ "metaKey": event.metaKey,
+ "altKey": event.altKey,
+ "shiftKey": event.shiftKey,
+ "target": event.target.id
+ });
+ appendMessage(event.type + " " +
+ "button: " + event.button + ", " +
+ "pageX: " + event.pageX + ", " +
+ "pageY: " + event.pageY + ", " +
+ "button: " + event.button + ", " +
+ "buttons: " + event.buttons + ", " +
+ "ctrlKey: " + event.ctrlKey + ", " +
+ "altKey: " + event.altKey + ", " +
+ "metaKey: " + event.metaKey + ", " +
+ "shiftKey: " + event.shiftKey + ", " +
+ "target id: " + event.target.id);
+ }
+
+ function recordFirstPointerMove(event) {
+ recordPointerEvent(event);
+ window.removeEventListener("mousemove", recordFirstPointerMove);
+ }
+
+ function grabOnce(event) {
+ grab(event);
+ els.dragTarget.removeEventListener("mousedown", grabOnce);
+ }
+
+ function dropOnce(moveHandler) {
+ return function (event) {
+ moveHandler(event);
+ els.dragArea.removeEventListener("mouseup", dropOnce);
+ }
+ }
+
+ function resetEvents() {
+ allEvents.events.length = 0;
+ displayMessage("");
+ }
+
+ function drop(moveHandler) {
+ return function (event) {
+ els.dragArea.removeEventListener("mousemove", moveHandler);
+ els.dragTarget.style.backgroundColor = "yellow";
+ els.dragTarget.addEventListener("mousedown", grab);
+ recordPointerEvent(event);
+ };
+ }
+
+ function move(el, offsetX, offsetY, timeout) {
+ return function(event) {
+ setTimeout(function() {
+ el.style.top = event.clientY + offsetY + "px";
+ el.style.left = event.clientX + offsetX + "px";
+ }, timeout);
+ };
+ }
+
+ function grab(event) {
+ event.target.style.backgroundColor = "red";
+ let boxRect = event.target.getBoundingClientRect();
+ let areaRect = event.target.parentElement.getBoundingClientRect();
+ let moveHandler = move(
+ event.target,
+ // coordinates of dragTarget must be relative to dragArea such that
+ // dragTarget remains under the pointer
+ -(areaRect.left + (event.clientX - boxRect.left)),
+ -(areaRect.top + (event.clientY - boxRect.top)),
+ 20);
+ els.dragArea.addEventListener("mousemove", moveHandler);
+ els.dragArea.addEventListener("mouseup", dropOnce(drop(moveHandler)));
+ }
+
+ document.addEventListener("DOMContentLoaded", function() {
+ var keyReporter = document.getElementById("keys");
+ keyReporter.addEventListener("keyup", recordKeyboardEvent);
+ keyReporter.addEventListener("keypress", recordKeyboardEvent);
+ keyReporter.addEventListener("keydown", recordKeyboardEvent);
+
+ var outer = document.getElementById("outer");
+ outer.addEventListener("click", recordPointerEvent);
+ outer.addEventListener("dblclick", recordPointerEvent);
+ outer.addEventListener("mousedown", recordPointerEvent);
+ outer.addEventListener("mouseup", recordPointerEvent);
+ outer.addEventListener("contextmenu", recordPointerEvent);
+
+ window.addEventListener("mousemove", recordFirstPointerMove);
+ //visual cue for mousemove
+ var pointer = document.getElementById("trackPointer");
+ window.addEventListener("mousemove", move(pointer, 15, 15, 30));
+ // drag and drop
+ els.dragArea = document.getElementById("dragArea");
+ els.dragArea.addEventListener("dragstart", recordPointerEvent);
+ els.dragTarget = document.getElementById("dragTarget");
+ els.dragTarget.addEventListener("mousedown", grabOnce);
+
+ var draggable = document.getElementById("draggable");
+ draggable.addEventListener("dragstart", recordPointerEvent);
+ draggable.addEventListener("dragenter", recordPointerEvent);
+ draggable.addEventListener("dragend", recordPointerEvent);
+ draggable.addEventListener("dragleave", recordPointerEvent);
+ draggable.addEventListener("dragover", recordPointerEvent);
+
+ var droppable = document.getElementById("droppable");
+ droppable.addEventListener("drop", recordPointerEvent);
+ });
+ </script>
+</head>
+<body>
+ <div id="trackPointer" class="block"></div>
+ <div>
+ <h2>KeyReporter</h2>
+ <input type="text" id="keys" size="80">
+ </div>
+ <div>
+ <h2>ClickReporter</h2>
+ <div id="outer" class="area">
+ </div>
+ </div>
+ <div>
+ <h2>DragReporter</h2>
+ <div id="dragArea" class="area">
+ <div id="dragTarget" class="block"></div>
+ </div>
+ </div>
+ <div>
+ <h2>draggable</h2>
+ <div class=box>
+ <div id=draggable draggable="true" class="area"></div>&nbsp;
+ <div id=droppable dropzone="true" class="area"></div>
+ </div>
+ </div>
+ <div id="resultContainer">
+ <h2>Events</h2>
+ <div id="events"></div>
+ </div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/user_prompts.py b/testing/web-platform/tests/webdriver/tests/perform_actions/user_prompts.py
new file mode 100644
index 0000000000..6bbd22a2a2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/user_prompts.py
@@ -0,0 +1,124 @@
+# META: timeout=long
+
+import pytest
+
+from tests.perform_actions.support.refine import get_keys
+from tests.support.asserts import assert_error, assert_success, assert_dialog_handled
+
+
+actions = [{
+ "type": "key",
+ "id": "foobar",
+ "actions": [
+ {"type": "keyDown", "value": "a"},
+ {"type": "keyUp", "value": "a"},
+ ]
+}]
+
+
+def perform_actions(session, actions):
+ return session.transport.send(
+ "POST",
+ "/session/{session_id}/actions".format(session_id=session.session_id),
+ {"actions": actions})
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, key_chain, key_reporter):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = perform_actions(session, actions)
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert get_keys(key_reporter) == "a"
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, key_chain, key_reporter):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = perform_actions(session, actions)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert get_keys(key_reporter) == ""
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, key_reporter):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = perform_actions(session, actions)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert get_keys(key_reporter) == ""
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/validity.py b/testing/web-platform/tests/webdriver/tests/perform_actions/validity.py
new file mode 100644
index 0000000000..9c056b197d
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/validity.py
@@ -0,0 +1,80 @@
+import pytest
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def perform_actions(session, actions):
+ return session.transport.send(
+ "POST",
+ "/session/{session_id}/actions".format(session_id=session.session_id),
+ {"actions": actions})
+
+
+@pytest.mark.parametrize("action_type", ["none", "key", "pointer"])
+def test_pause_duration_invalid_type(session, action_type):
+ for invalid_type in [0.1, None, "foo", True, [], {}]:
+ actions = [{
+ "type": action_type,
+ "id": "foobar",
+ "actions": [{
+ "type": "pause",
+ "duration": invalid_type
+ }]
+ }]
+ response = perform_actions(session, actions)
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("action_type", ["none", "key", "pointer"])
+def test_pause_duration_invalid_value(session, action_type):
+ actions = [{
+ "type": action_type,
+ "id": "foobar",
+ "actions": [{
+ "type": "pause",
+ "duration": -1
+ }]
+ }]
+ response = perform_actions(session, actions)
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("action_type", ["none", "key", "pointer"])
+def test_pause_duration_valid(session, action_type):
+ for valid_duration in [0, 1]:
+ actions = [{
+ "type": action_type,
+ "id": "foobar",
+ "actions": [{
+ "type": "pause",
+ "duration": valid_duration
+ }]
+ }]
+ response = perform_actions(session, actions)
+ assert_success(response)
+
+
+@pytest.mark.parametrize("action_type", ["none", "key", "pointer"])
+def test_pause_without_duration(session, action_type):
+ actions = [{
+ "type": action_type,
+ "id": "foobar",
+ "actions": [{
+ "type": "pause",
+ }]
+ }]
+ response = perform_actions(session, actions)
+ assert_success(response)
+
+
+@pytest.mark.parametrize("action_type", ["none", "key", "pointer"])
+def test_action_without_id(session, action_type):
+ actions = [{
+ "type": action_type,
+ "actions": [{
+ "type": "pause",
+ "duration": 1
+ }]
+ }]
+ response = perform_actions(session, actions)
+ assert_error(response, "invalid argument")
diff --git a/testing/web-platform/tests/webdriver/tests/perform_actions/wheel.py b/testing/web-platform/tests/webdriver/tests/perform_actions/wheel.py
new file mode 100644
index 0000000000..59da29fe89
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/perform_actions/wheel.py
@@ -0,0 +1,75 @@
+import pytest
+
+from webdriver.error import InvalidArgumentException, NoSuchWindowException
+
+from tests.perform_actions.support.refine import get_events
+from tests.support.asserts import assert_move_to_coordinates
+from tests.support.helpers import filter_dict
+
+
+def test_null_response_value(session, wheel_chain):
+ value = wheel_chain.scroll(0, 0, 0, 10).perform()
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window, wheel_chain):
+ with pytest.raises(NoSuchWindowException):
+ wheel_chain.scroll(0, 0, 0, 10).perform()
+
+
+def test_no_browsing_context(session, closed_window, wheel_chain):
+ with pytest.raises(NoSuchWindowException):
+ wheel_chain.scroll(0, 0, 0, 10).perform()
+
+
+def test_wheel_scroll(session, test_actions_scroll_page, wheel_chain):
+ session.execute_script("document.scrollingElement.scrollTop = 0")
+
+ outer = session.find.css("#outer", all=False)
+ wheel_chain.scroll(0, 0, 5, 10, origin=outer).perform()
+ events = get_events(session)
+ assert len(events) == 1
+ assert events[0]["type"] == "wheel"
+ assert events[0]["deltaX"] >= 5
+ assert events[0]["deltaY"] >= 10
+ assert events[0]["deltaZ"] == 0
+ assert events[0]["target"] == "outer"
+
+
+def test_wheel_scroll_overflow(session, test_actions_scroll_page, wheel_chain):
+ session.execute_script("document.scrollingElement.scrollTop = 0")
+
+ scrollable = session.find.css("#scrollable", all=False)
+ wheel_chain.scroll(0, 0, 5, 10, origin=scrollable).perform()
+ events = get_events(session)
+ assert len(events) == 1
+ assert events[0]["type"] == "wheel"
+ assert events[0]["deltaX"] >= 5
+ assert events[0]["deltaY"] >= 10
+ assert events[0]["deltaZ"] == 0
+ assert events[0]["target"] == "scrollContent"
+
+
+def test_wheel_scroll_iframe(session, test_actions_scroll_page, wheel_chain):
+ session.execute_script("document.scrollingElement.scrollTop = 0")
+
+ subframe = session.find.css("#subframe", all=False)
+ wheel_chain.scroll(0, 0, 5, 10, origin=subframe).perform()
+ events = get_events(session)
+ assert len(events) == 1
+ assert events[0]["type"] == "wheel"
+ assert events[0]["deltaX"] >= 5
+ assert events[0]["deltaY"] >= 10
+ assert events[0]["deltaZ"] == 0
+ assert events[0]["target"] == "iframeContent"
+
+
+@pytest.mark.parametrize("missing", ["x", "y", "deltaX", "deltaY"])
+def test_wheel_missing_prop(session, test_actions_scroll_page, wheel_chain, missing):
+ session.execute_script("document.scrollingElement.scrollTop = 0")
+
+ outer = session.find.css("#outer", all=False)
+ actions = wheel_chain.scroll(0, 0, 5, 10, origin=outer)
+ del actions._actions[-1][missing]
+ with pytest.raises(InvalidArgumentException):
+ actions.perform()
diff --git a/testing/web-platform/tests/webdriver/tests/permissions/__init__.py b/testing/web-platform/tests/webdriver/tests/permissions/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/permissions/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/permissions/set.py b/testing/web-platform/tests/webdriver/tests/permissions/set.py
new file mode 100644
index 0000000000..9b71c4486c
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/permissions/set.py
@@ -0,0 +1,83 @@
+from tests.support.asserts import assert_error, assert_success
+import pytest
+
+def query(session, name):
+ script = """
+ var done = arguments[0];
+ navigator.permissions.query({ name: '%s' })
+ .then(function(value) {
+ done({ status: 'success', value: value && value.state });
+ }, function(error) {
+ done({ status: 'error', value: error && error.message });
+ });
+ """ % name
+
+ return session.transport.send(
+ "POST", "/session/{session_id}/execute/async".format(**vars(session)),
+ {"script": script, "args": []})
+
+# > 1. Let parameters be the parameters argument, converted to an IDL value of
+# > type PermissionSetParameters. If this throws an exception, return a
+# > WebDriver error with WebDriver error code invalid argument.
+@pytest.mark.parametrize("parameters", [
+ #{ "descriptor": { "name": "geolocation" }, "state": "granted" }
+ { "descriptor": { "name": 23 }, "state": "granted" },
+ { "descriptor": { }, "state": "granted" },
+ { "descriptor": { "name": "geolocation" }, "state": "Granted" },
+ { "descriptor": 23, "state": "granted" },
+ { "descriptor": "geolocation", "state": "granted" },
+ { "descriptor": [ { "name": "geolocation" } ], "state": "granted" },
+ [ { "descriptor": { "name": "geolocation" }, "state": "granted" } ],
+])
+@pytest.mark.capabilities({"acceptInsecureCerts": True})
+def test_invalid_parameters(session, url, parameters):
+ session.url = url("/common/blank.html", protocol="https")
+ response = session.transport.send(
+ "POST",
+ "/session/{session_id}/permissions".format(**vars(session)),
+ parameters
+ )
+ assert_error(response, "invalid argument")
+
+# > 6. If settings is a non-secure context and rootDesc.name isn't allowed in
+# > non-secure contexts, return a WebDriver error with WebDriver error code
+# > invalid argument.
+@pytest.mark.parametrize("state", ["granted", "denied", "prompt"])
+def test_non_secure_context(session, url, state):
+ session.url = url("/common/blank.html", protocol="http")
+ response = session.transport.send(
+ "POST", "/session/{session_id}/permissions".format(**vars(session)),
+ { "descriptor": { "name": "push" }, "state": state }
+ )
+
+ assert_error(response, "invalid argument")
+
+@pytest.mark.parametrize("state", ["granted", "denied", "prompt"])
+@pytest.mark.capabilities({"acceptInsecureCerts": True})
+def test_set_to_state(session, url, state):
+ session.url = url("/common/blank.html", protocol="https")
+ parameters = { "descriptor": { "name": "geolocation" }, "state": state }
+ response = session.transport.send(
+ "POST", "/session/{session_id}/permissions".format(**vars(session)),
+ parameters
+ )
+
+ try:
+ assert_success(response)
+ except AssertionError:
+ # > 4. If parameters.state is an inappropriate permission state for any
+ # > implementation-defined reason, return a WebDriver error with
+ # > WebDriver error code invalid argument.
+ assert_error(response, "invalid argument")
+ return
+
+ assert response.body.get("value") == None
+
+ response = query(session, "geolocation")
+
+ assert_success(response)
+ result = response.body.get("value")
+
+ assert isinstance(result, dict)
+ assert result.get("status") == "success"
+ assert result.get("value") == state
diff --git a/testing/web-platform/tests/webdriver/tests/print/__init__.py b/testing/web-platform/tests/webdriver/tests/print/__init__.py
new file mode 100644
index 0000000000..788c0e1a31
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/print/__init__.py
@@ -0,0 +1,21 @@
+def load_pdf_document(session, inline, pdf_data):
+ """Load a PDF document in the browser using pdf.js"""
+ session.url = inline("""
+<!doctype html>
+<script src="/_pdf_js/pdf.js"></script>
+<canvas></canvas>
+<script>
+async function getText() {
+ pages = [];
+ let loadingTask = pdfjsLib.getDocument({data: atob("%s")});
+ let pdf = await loadingTask.promise;
+ for (let pageNumber=1; pageNumber<=pdf.numPages; pageNumber++) {
+ let page = await pdf.getPage(pageNumber);
+ textContent = await page.getTextContent()
+ text = textContent.items.map(x => x.str).join("");
+ pages.push(text);
+ }
+ return pages
+}
+</script>
+""" % pdf_data)
diff --git a/testing/web-platform/tests/webdriver/tests/print/printcmd.py b/testing/web-platform/tests/webdriver/tests/print/printcmd.py
new file mode 100644
index 0000000000..169e2249a9
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/print/printcmd.py
@@ -0,0 +1,142 @@
+# META: timeout=long
+from base64 import decodebytes
+
+import pytest
+
+from . import load_pdf_document
+from tests.support.asserts import assert_error, assert_success
+
+
+def do_print(session, options):
+ return session.transport.send(
+ "POST", "session/{session_id}/print".format(**vars(session)),
+ options)
+
+
+def assert_pdf(data):
+ assert data.startswith(b"%PDF-"), "Decoded data starts with the PDF signature"
+ assert data.endswith(b"%%EOF\n"), "Decoded data ends with the EOF flag"
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = do_print(session, {})
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = do_print(session, {})
+ value = assert_success(response)
+ pdf = decodebytes(value.encode())
+ assert_pdf(pdf)
+
+
+def test_html_document(session, inline):
+ session.url = inline("Test")
+
+ response = do_print(session, {
+ "page": {"width": 10,
+ "height": 20},
+ "shrinkToFit": False
+ })
+ value = assert_success(response)
+ pdf = decodebytes(value.encode())
+ # TODO: Test that the output is reasonable
+ assert_pdf(pdf)
+
+
+def test_large_html_document(session, inline):
+ session.url = inline("<canvas id=\"image\"></canvas>")
+
+ session.execute_script(
+ """
+ const width = 700;
+ const height = 900;
+
+ const canvas = document.getElementById("image");
+ const context = canvas.getContext("2d");
+
+ canvas.width = width;
+ canvas.height = height;
+
+ for (let x = 0; x < width; ++x) {
+ for (let y = 0; y < height; ++y) {
+ const colourHex = Math.floor(Math.random() * 0xffffff).toString(16);
+
+ context.fillStyle = `#${colourHex}`;
+ context.fillRect(x, y, 1, 1);
+ }
+ }
+ """
+ )
+
+ response = do_print(session, {})
+ value = assert_success(response)
+ pdf = decodebytes(value.encode())
+
+ # This was added to test the fix for a bug in firefox where a PDF larger
+ # than 500kb would cause an error. If the resulting PDF is smaller than that
+ # it could pass incorrectly.
+ assert len(pdf) > 500000
+ assert_pdf(pdf)
+
+
+@pytest.mark.parametrize("ranges,expected", [
+ (["2-4"], ["Page 2", "Page 3", "Page 4"]),
+ (["2-4", "2-3"], ["Page 2", "Page 3", "Page 4"]),
+ (["2-4", "3-5"], ["Page 2", "Page 3", "Page 4", "Page 5"]),
+ (["9-"], ["Page 9", "Page 10"]),
+ (["-2"], ["Page 1", "Page 2"]),
+ (["7"], ["Page 7"]),
+ (["-2", "9-", "7"], ["Page 1", "Page 2", "Page 7", "Page 9", "Page 10"]),
+ (["-5", "2-"], ["Page 1", "Page 2", "Page 3", "Page 4", "Page 5", "Page 6", "Page 7", "Page 8", "Page 9", "Page 10"]),
+ ([], ["Page 1", "Page 2", "Page 3", "Page 4", "Page 5", "Page 6", "Page 7", "Page 8", "Page 9", "Page 10"]),
+])
+def test_page_ranges_document(session, inline, ranges, expected):
+ session.url = inline("""
+<style>
+div {page-break-after: always}
+</style>
+
+<div>Page 1</div>
+<div>Page 2</div>
+<div>Page 3</div>
+<div>Page 4</div>
+<div>Page 5</div>
+<div>Page 6</div>
+<div>Page 7</div>
+<div>Page 8</div>
+<div>Page 9</div>
+<div>Page 10</div>""")
+
+ response = do_print(session, {
+ "pageRanges": ranges
+ })
+ value = assert_success(response)
+ pdf = decodebytes(value.encode())
+ # TODO: Test that the output is reasonable
+ assert_pdf(pdf)
+
+ load_pdf_document(session, inline, value)
+ pages = session.execute_async_script("""let callback = arguments[arguments.length - 1];
+window.getText().then(pages => callback(pages));""")
+ assert pages == expected
+
+
+@pytest.mark.parametrize("options", [{"orientation": 0},
+ {"orientation": "foo"},
+ {"scale": "1"},
+ {"scale": 3},
+ {"scale": 0.01},
+ {"margin": {"top": "1"}},
+ {"margin": {"bottom": -1}},
+ {"page": {"height": False}},
+ {"shrinkToFit": "false"},
+ {"pageRanges": ["3-2"]},
+ {"pageRanges": ["a-2"]},
+ {"pageRanges": ["1:2"]},
+ {"pageRanges": ["1-2-3"]},
+ {"pageRanges": [None]},
+ {"pageRanges": ["1-2", {}]}])
+def test_page_ranges_invalid(session, options):
+ response = do_print(session, options)
+ assert_error(response, "invalid argument")
diff --git a/testing/web-platform/tests/webdriver/tests/print/user_prompts.py b/testing/web-platform/tests/webdriver/tests/print/user_prompts.py
new file mode 100644
index 0000000000..0a29b518ac
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/print/user_prompts.py
@@ -0,0 +1,111 @@
+# META: timeout=long
+from base64 import decodebytes
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+from .printcmd import do_print, assert_pdf
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input/>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = do_print(session, {})
+ value = assert_success(response)
+
+ pdf = decodebytes(value.encode())
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert_pdf(pdf)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input/>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = do_print(session, {})
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input/>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = do_print(session, {})
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/refresh/__init__.py b/testing/web-platform/tests/webdriver/tests/refresh/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/refresh/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/refresh/refresh.py b/testing/web-platform/tests/webdriver/tests/refresh/refresh.py
new file mode 100644
index 0000000000..b3647130c7
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/refresh/refresh.py
@@ -0,0 +1,123 @@
+import pytest
+
+from webdriver import error
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def refresh(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/refresh".format(**vars(session)))
+
+
+def test_null_response_value(session, inline):
+ session.url = inline("<div>")
+
+ response = refresh(session)
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = refresh(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame, inline):
+ url = inline("<div id=foo>")
+
+ session.url = url
+ element = session.find.css("#foo", all=False)
+
+ response = refresh(session)
+ assert_success(response)
+
+ with pytest.raises(error.StaleElementReferenceException):
+ element.property("id")
+
+ assert session.url == url
+ assert session.find.css("#foo", all=False)
+
+
+def test_basic(session, inline):
+ url = inline("<div id=foo>")
+
+ session.url = url
+ element = session.find.css("#foo", all=False)
+
+ response = refresh(session)
+ assert_success(response)
+
+ with pytest.raises(error.StaleElementReferenceException):
+ element.property("id")
+
+ assert session.url == url
+ assert session.find.css("#foo", all=False)
+
+
+def test_dismissed_beforeunload(session, inline):
+ url_beforeunload = inline("""
+ <input type="text">
+ <script>
+ window.addEventListener("beforeunload", function (event) {
+ event.preventDefault();
+ });
+ </script>
+ """)
+
+ session.url = url_beforeunload
+ element = session.find.css("input", all=False)
+ element.send_keys("bar")
+
+ response = refresh(session)
+ assert_success(response)
+
+ with pytest.raises(error.StaleElementReferenceException):
+ element.property("id")
+
+ session.find.css("input", all=False)
+
+
+def test_history_pushstate(session, inline):
+ pushstate_page = inline("""
+ <script>
+ function pushState() {
+ history.pushState({foo: "bar"}, "", "#pushstate");
+ }
+ </script>
+ <a onclick="javascript:pushState();">click</a>
+ """)
+
+ session.url = pushstate_page
+
+ session.find.css("a", all=False).click()
+ assert session.url == "{}#pushstate".format(pushstate_page)
+ assert session.execute_script("return history.state;") == {"foo": "bar"}
+
+ session.execute_script("""
+ let elem = window.document.createElement('div');
+ window.document.body.appendChild(elem);
+ """)
+ element = session.find.css("div", all=False)
+
+ response = refresh(session)
+ assert_success(response)
+
+ assert session.url == "{}#pushstate".format(pushstate_page)
+ assert session.execute_script("return history.state;") == {"foo": "bar"}
+
+ with pytest.raises(error.StaleElementReferenceException):
+ element.property("id")
+
+
+def test_refresh_switches_to_parent_browsing_context(session, create_frame, inline):
+ session.url = inline("<div id=foo>")
+
+ session.switch_frame(create_frame())
+ with pytest.raises(error.NoSuchElementException):
+ session.find.css("#foo", all=False)
+
+ response = refresh(session)
+ assert_success(response)
+
+ session.find.css("#foo", all=False)
diff --git a/testing/web-platform/tests/webdriver/tests/refresh/user_prompts.py b/testing/web-platform/tests/webdriver/tests/refresh/user_prompts.py
new file mode 100644
index 0000000000..7e944b53b0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/refresh/user_prompts.py
@@ -0,0 +1,117 @@
+# META: timeout=long
+
+import pytest
+
+from webdriver.error import StaleElementReferenceException
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def refresh(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/refresh".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<div id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = refresh(session)
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ with pytest.raises(StaleElementReferenceException):
+ element.property("id")
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<div id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = refresh(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert element.property("id") == "foo"
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<div id=foo>")
+ element = session.find.css("#foo", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = refresh(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert element.property("id") == "foo"
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type):
+ # retval not testable for confirm and prompt because window has been reloaded
+ check_user_prompt_closed_without_exception(dialog_type, None)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type):
+ # retval not testable for confirm and prompt because window has been reloaded
+ check_user_prompt_closed_without_exception(dialog_type, None)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/release_actions/__init__.py b/testing/web-platform/tests/webdriver/tests/release_actions/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/release_actions/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/release_actions/conftest.py b/testing/web-platform/tests/webdriver/tests/release_actions/conftest.py
new file mode 100644
index 0000000000..df86db6306
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/release_actions/conftest.py
@@ -0,0 +1,40 @@
+import pytest
+
+
+@pytest.fixture
+def key_chain(session):
+ return session.actions.sequence("key", "keyboard_id")
+
+
+@pytest.fixture
+def mouse_chain(session):
+ return session.actions.sequence(
+ "pointer",
+ "pointer_id",
+ {"pointerType": "mouse"})
+
+
+@pytest.fixture
+def none_chain(session):
+ return session.actions.sequence("none", "none_id")
+
+
+@pytest.fixture(autouse=True)
+def release_actions(session, request):
+ # release all actions after each test
+ # equivalent to a teardown_function, but with access to session fixture
+ request.addfinalizer(session.actions.release)
+
+
+@pytest.fixture
+def key_reporter(session, test_actions_page, request):
+ """Represents focused input element from `test_keys_page` fixture."""
+ input_el = session.find.css("#keys", all=False)
+ input_el.click()
+ session.execute_script("resetEvents();")
+ return input_el
+
+
+@pytest.fixture
+def test_actions_page(session, url):
+ session.url = url("/webdriver/tests/release_actions/support/test_actions_wdspec.html")
diff --git a/testing/web-platform/tests/webdriver/tests/release_actions/release.py b/testing/web-platform/tests/webdriver/tests/release_actions/release.py
new file mode 100644
index 0000000000..5df1ff4be9
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/release_actions/release.py
@@ -0,0 +1,23 @@
+from tests.support.asserts import assert_error, assert_success
+
+
+def release_actions(session):
+ return session.transport.send(
+ "DELETE",
+ "/session/{session_id}/actions".format(**vars(session)),
+ )
+
+
+def test_null_response_value(session):
+ response = release_actions(session)
+ assert_success(response, None)
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = release_actions(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = release_actions(session)
+ assert_error(response, "no such window")
diff --git a/testing/web-platform/tests/webdriver/tests/release_actions/sequence.py b/testing/web-platform/tests/webdriver/tests/release_actions/sequence.py
new file mode 100644
index 0000000000..75143d85ca
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/release_actions/sequence.py
@@ -0,0 +1,83 @@
+# META: timeout=long
+
+from tests.release_actions.support.refine import get_events, get_keys
+from tests.support.helpers import filter_dict
+
+
+def test_release_no_actions_sends_no_events(session, key_reporter):
+ session.actions.release()
+ assert len(get_keys(key_reporter)) == 0
+ assert len(get_events(session)) == 0
+
+
+def test_release_char_sequence_sends_keyup_events_in_reverse(session,
+ key_reporter,
+ key_chain):
+ key_chain \
+ .key_down("a") \
+ .key_down("b") \
+ .perform()
+ # reset so we only see the release events
+ session.execute_script("resetEvents();")
+ session.actions.release()
+ expected = [
+ {"code": "KeyB", "key": "b", "type": "keyup"},
+ {"code": "KeyA", "key": "a", "type": "keyup"},
+ ]
+ all_events = get_events(session)
+ events = [filter_dict(e, expected[0]) for e in all_events]
+ if len(events) > 0 and events[0]["code"] is None:
+ # Remove 'code' entry if browser doesn't support it
+ expected = [filter_dict(e, {"key": "", "type": ""}) for e in expected]
+ events = [filter_dict(e, expected[0]) for e in events]
+ assert events == expected
+
+
+def test_release_mouse_sequence_resets_dblclick_state(session,
+ test_actions_page,
+ mouse_chain):
+ reporter = session.find.css("#outer", all=False)
+
+ mouse_chain \
+ .click(element=reporter) \
+ .perform()
+ session.actions.release()
+ mouse_chain \
+ .perform()
+ events = get_events(session)
+
+ expected = [
+ {"type": "mousedown", "button": 0},
+ {"type": "mouseup", "button": 0},
+ {"type": "click", "button": 0},
+ {"type": "mousedown", "button": 0},
+ {"type": "mouseup", "button": 0},
+ {"type": "click", "button": 0},
+ ]
+ filtered_events = [filter_dict(e, expected[0]) for e in events]
+ assert expected == filtered_events[1:]
+
+
+def test_no_release_mouse_sequence_keeps_dblclick_state(session,
+ test_actions_page,
+ mouse_chain):
+ reporter = session.find.css("#outer", all=False)
+
+ mouse_chain \
+ .click(element=reporter) \
+ .perform()
+ mouse_chain \
+ .perform()
+ events = get_events(session)
+
+ expected = [
+ {"type": "mousedown", "button": 0},
+ {"type": "mouseup", "button": 0},
+ {"type": "click", "button": 0},
+ {"type": "mousedown", "button": 0},
+ {"type": "mouseup", "button": 0},
+ {"type": "click", "button": 0},
+ {"type": "dblclick", "button": 0},
+ ]
+ filtered_events = [filter_dict(e, expected[0]) for e in events]
+ assert expected == filtered_events[1:]
diff --git a/testing/web-platform/tests/webdriver/tests/release_actions/support/__init__.py b/testing/web-platform/tests/webdriver/tests/release_actions/support/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/release_actions/support/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/release_actions/support/refine.py b/testing/web-platform/tests/webdriver/tests/release_actions/support/refine.py
new file mode 100644
index 0000000000..90f722587c
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/release_actions/support/refine.py
@@ -0,0 +1,24 @@
+def get_events(session):
+ """Return list of key events recorded in the test_keys_page fixture."""
+ events = session.execute_script("return allEvents.events;") or []
+ # `key` values in `allEvents` may be escaped (see `escapeSurrogateHalf` in
+ # test_keys_wdspec.html), so this converts them back into unicode literals.
+ for e in events:
+ # example: turn "U+d83d" (6 chars) into u"\ud83d" (1 char)
+ if "key" in e and e["key"].startswith(u"U+"):
+ key = e["key"]
+ hex_suffix = key[key.index("+") + 1:]
+ e["key"] = unichr(int(hex_suffix, 16))
+ return events
+
+
+def get_keys(input_el):
+ """Get printable characters entered into `input_el`.
+
+ :param input_el: HTML input element.
+ """
+ rv = input_el.property("value")
+ if rv is None:
+ return ""
+ else:
+ return rv
diff --git a/testing/web-platform/tests/webdriver/tests/release_actions/support/test_actions_wdspec.html b/testing/web-platform/tests/webdriver/tests/release_actions/support/test_actions_wdspec.html
new file mode 100644
index 0000000000..6f844cd255
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/release_actions/support/test_actions_wdspec.html
@@ -0,0 +1,197 @@
+<!doctype html>
+<meta charset=utf-8>
+<html>
+<head>
+ <title>Test Actions</title>
+ <style>
+ div { padding:0px; margin: 0px; }
+ #trackPointer { position: fixed; }
+ #resultContainer { width: 600px; height: 60px; }
+ .area { width: 100px; height: 50px; background-color: #ccc; }
+ .block { width: 5px; height: 5px; border: solid 1px red; }
+ #dragArea { position: relative; }
+ #dragTarget { position: absolute; top:22px; left:47px;}
+ </style>
+ <script>
+ "use strict";
+ var els = {};
+ var allEvents = { events: [] };
+ function displayMessage(message) {
+ document.getElementById("events").innerHTML = "<p>" + message + "</p>";
+ }
+
+ function appendMessage(message) {
+ document.getElementById("events").innerHTML += "<p>" + message + "</p>";
+ }
+
+ /**
+ * Escape |key| if it's in a surrogate-half character range.
+ *
+ * Example: given "\ud83d" return "U+d83d".
+ *
+ * Otherwise JSON.stringify will convert it to U+FFFD (REPLACEMENT CHARACTER)
+ * when returning a value from executeScript, for example.
+ */
+ function escapeSurrogateHalf(key) {
+ if (typeof key !== "undefined" && key.length === 1) {
+ var charCode = key.charCodeAt(0);
+ var highSurrogate = charCode >= 0xD800 && charCode <= 0xDBFF;
+ var surrogate = highSurrogate || (charCode >= 0xDC00 && charCode <= 0xDFFF);
+ if (surrogate) {
+ key = "U+" + charCode.toString(16);
+ }
+ }
+ return key;
+ }
+
+ function recordKeyboardEvent(event) {
+ var key = escapeSurrogateHalf(event.key);
+ allEvents.events.push({
+ "code": event.code,
+ "key": key,
+ "which": event.which,
+ "location": event.location,
+ "ctrl": event.ctrlKey,
+ "meta": event.metaKey,
+ "shift": event.shiftKey,
+ "repeat": event.repeat,
+ "type": event.type
+ });
+ appendMessage(event.type + " " +
+ "code: " + event.code + ", " +
+ "key: " + key + ", " +
+ "which: " + event.which + ", " +
+ "keyCode: " + event.keyCode);
+ }
+
+ function recordPointerEvent(event) {
+ if (event.type === "contextmenu") {
+ event.preventDefault();
+ }
+ allEvents.events.push({
+ "type": event.type,
+ "button": event.button,
+ "buttons": event.buttons,
+ "pageX": event.pageX,
+ "pageY": event.pageY,
+ "ctrlKey": event.ctrlKey,
+ "metaKey": event.metaKey,
+ "altKey": event.altKey,
+ "shiftKey": event.shiftKey,
+ "target": event.target.id
+ });
+ appendMessage(event.type + " " +
+ "button: " + event.button + ", " +
+ "pageX: " + event.pageX + ", " +
+ "pageY: " + event.pageY + ", " +
+ "button: " + event.button + ", " +
+ "buttons: " + event.buttons + ", " +
+ "ctrlKey: " + event.ctrlKey + ", " +
+ "altKey: " + event.altKey + ", " +
+ "metaKey: " + event.metaKey + ", " +
+ "shiftKey: " + event.shiftKey + ", " +
+ "target id: " + event.target.id);
+ }
+
+ function recordFirstPointerMove(event) {
+ recordPointerEvent(event);
+ window.removeEventListener("mousemove", recordFirstPointerMove);
+ }
+
+ function grabOnce(event) {
+ grab(event);
+ els.dragTarget.removeEventListener("mousedown", grabOnce);
+ }
+
+ function dropOnce(moveHandler) {
+ return function (event) {
+ moveHandler(event);
+ els.dragArea.removeEventListener("mouseup", dropOnce);
+ }
+ }
+
+ function resetEvents() {
+ allEvents.events.length = 0;
+ displayMessage("");
+ }
+
+ function drop(moveHandler) {
+ return function (event) {
+ els.dragArea.removeEventListener("mousemove", moveHandler);
+ els.dragTarget.style.backgroundColor = "yellow";
+ els.dragTarget.addEventListener("mousedown", grab);
+ recordPointerEvent(event);
+ };
+ }
+
+ function move(el, offsetX, offsetY, timeout) {
+ return function(event) {
+ setTimeout(function() {
+ el.style.top = event.clientY + offsetY + "px";
+ el.style.left = event.clientX + offsetX + "px";
+ }, timeout);
+ };
+ }
+
+ function grab(event) {
+ event.target.style.backgroundColor = "red";
+ let boxRect = event.target.getBoundingClientRect();
+ let areaRect = event.target.parentElement.getBoundingClientRect();
+ let moveHandler = move(
+ event.target,
+ // coordinates of dragTarget must be relative to dragArea such that
+ // dragTarget remains under the pointer
+ -(areaRect.left + (event.clientX - boxRect.left)),
+ -(areaRect.top + (event.clientY - boxRect.top)),
+ 20);
+ els.dragArea.addEventListener("mousemove", moveHandler);
+ els.dragArea.addEventListener("mouseup", dropOnce(drop(moveHandler)));
+ }
+
+ document.addEventListener("DOMContentLoaded", function() {
+ var keyReporter = document.getElementById("keys");
+ keyReporter.addEventListener("keyup", recordKeyboardEvent);
+ keyReporter.addEventListener("keypress", recordKeyboardEvent);
+ keyReporter.addEventListener("keydown", recordKeyboardEvent);
+
+ var outer = document.getElementById("outer");
+ outer.addEventListener("click", recordPointerEvent);
+ outer.addEventListener("dblclick", recordPointerEvent);
+ outer.addEventListener("mousedown", recordPointerEvent);
+ outer.addEventListener("mouseup", recordPointerEvent);
+ outer.addEventListener("contextmenu", recordPointerEvent);
+
+ window.addEventListener("mousemove", recordFirstPointerMove);
+ //visual cue for mousemove
+ var pointer = document.getElementById("trackPointer");
+ window.addEventListener("mousemove", move(pointer, 15, 15, 30));
+ // drag and drop
+ els.dragArea = document.getElementById("dragArea");
+ els.dragTarget = document.getElementById("dragTarget");
+ els.dragTarget.addEventListener("mousedown", grabOnce);
+ });
+ </script>
+</head>
+<body>
+ <div id="trackPointer" class="block"></div>
+ <div>
+ <h2>KeyReporter</h2>
+ <input type="text" id="keys" size="80">
+ </div>
+ <div>
+ <h2>ClickReporter</h2>
+ <div id="outer" class="area">
+ </div>
+ </div>
+ <div>
+ <h2>DragReporter</h2>
+ <div id="dragArea" class="area">
+ <div id="dragTarget" class="block"></div>
+ </div>
+ </div>
+ <div id="resultContainer">
+ <h2>Events</h2>
+ <div id="events"></div>
+ </div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webdriver/tests/send_alert_text/__init__.py b/testing/web-platform/tests/webdriver/tests/send_alert_text/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/send_alert_text/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/send_alert_text/conftest.py b/testing/web-platform/tests/webdriver/tests/send_alert_text/conftest.py
new file mode 100644
index 0000000000..b080761bde
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/send_alert_text/conftest.py
@@ -0,0 +1,24 @@
+import pytest
+
+from webdriver.error import NoSuchAlertException, NoSuchWindowException
+
+
+@pytest.fixture(name="session")
+def fixture_session(capabilities, session):
+ """Prevent dialog rate limits by running the test in a new window."""
+ original_handle = session.window_handle
+ session.window_handle = session.new_window()
+
+ yield session
+
+ try:
+ session.alert.dismiss()
+ except NoSuchAlertException:
+ pass
+
+ try:
+ session.window.close()
+ except NoSuchWindowException:
+ pass
+
+ session.window_handle = original_handle
diff --git a/testing/web-platform/tests/webdriver/tests/send_alert_text/send.py b/testing/web-platform/tests/webdriver/tests/send_alert_text/send.py
new file mode 100644
index 0000000000..df218c803b
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/send_alert_text/send.py
@@ -0,0 +1,94 @@
+import pytest
+
+from webdriver.error import NoSuchAlertException
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.sync import Poll
+
+
+@pytest.fixture
+def page(session, inline):
+ session.url = inline("""
+ <script>window.result = window.prompt('Enter Your Name: ', 'Name');</script>
+ """)
+
+
+def send_alert_text(session, text=None):
+ return session.transport.send(
+ "POST", "session/{session_id}/alert/text".format(**vars(session)),
+ {"text": text})
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/alert/text".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_null_response_value(session, page):
+ response = send_alert_text(session, "Federer")
+ value = assert_success(response)
+ assert value is None
+
+
+@pytest.mark.parametrize("text", [None, {}, [], 42, True])
+def test_invalid_input(session, page, text):
+ response = send_alert_text(session, text)
+ assert_error(response, "invalid argument")
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = send_alert_text(session, "Federer")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = send_alert_text(session, "Federer")
+ assert_error(response, "no such alert")
+
+
+def test_no_user_prompt(session):
+ response = send_alert_text(session, "Federer")
+ assert_error(response, "no such alert")
+
+
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm"])
+def test_alert_element_not_interactable(session, inline, dialog_type):
+ session.url = inline("<script>window.{}('Hello');</script>".format(dialog_type))
+
+ response = send_alert_text(session, "Federer")
+ assert_error(response, "element not interactable")
+
+
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm"])
+def test_chained_alert_element_not_interactable(session, inline, dialog_type):
+ session.url = inline("<script>window.{}('Hello');</script>".format(dialog_type))
+ session.alert.accept()
+
+ session.url = inline("<script>window.{}('Hello');</script>".format(dialog_type))
+ response = send_alert_text(session, "Federer")
+ assert_error(response, "element not interactable")
+
+
+@pytest.mark.parametrize("text", ["", "Federer", " Fed erer ", "Fed\terer"])
+def test_send_alert_text(session, page, text):
+ send_response = send_alert_text(session, text)
+ assert_success(send_response)
+
+ session.alert.accept()
+
+ assert session.execute_script("return window.result") == text
+
+
+def test_unexpected_alert(session):
+ session.execute_script("setTimeout(function() { prompt('Hello'); }, 100);")
+ wait = Poll(
+ session,
+ timeout=5,
+ ignored_exceptions=NoSuchAlertException,
+ message="No user prompt with text 'Hello' detected")
+ wait.until(lambda s: s.alert.text == "Hello")
+
+ response = send_alert_text(session, "Federer")
+ assert_success(response)
diff --git a/testing/web-platform/tests/webdriver/tests/set_timeouts/__init__.py b/testing/web-platform/tests/webdriver/tests/set_timeouts/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/set_timeouts/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/set_timeouts/set.py b/testing/web-platform/tests/webdriver/tests/set_timeouts/set.py
new file mode 100644
index 0000000000..6620f4df2a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/set_timeouts/set.py
@@ -0,0 +1,95 @@
+import pytest
+
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def set_timeouts(session, timeouts):
+ return session.transport.send(
+ "POST", "session/{session_id}/timeouts".format(**vars(session)),
+ timeouts)
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/timeouts".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_null_response_value(session):
+ timeouts = {"implicit": 10, "pageLoad": 10, "script": 10}
+ response = set_timeouts(session, timeouts)
+ value = assert_success(response)
+ assert value is None
+
+
+@pytest.mark.parametrize("value", [1, "{}", False, []])
+def test_parameters_invalid(session, value):
+ response = set_timeouts(session, value)
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("value", [{}, {"a": 42}])
+def test_parameters_unknown_fields(session, value):
+ original = session.timeouts._get()
+
+ response = set_timeouts(session, value)
+ assert_success(response)
+
+ assert session.timeouts._get() == original
+
+
+def test_script_parameter_empty_no_change(session):
+ original = session.timeouts._get()
+
+ response = set_timeouts(session, {"implicit": 100})
+ assert_success(response)
+
+ assert session.timeouts._get()["script"] == original["script"]
+
+
+@pytest.mark.parametrize("typ", ["implicit", "pageLoad", "script"])
+@pytest.mark.parametrize("value", [0, 2.0, 2**53 - 1])
+def test_positive_integer(session, typ, value):
+ response = set_timeouts(session, {typ: value})
+ assert_success(response)
+
+ assert session.timeouts._get(typ) == value
+
+
+@pytest.mark.parametrize("typ", ["implicit", "pageLoad"])
+@pytest.mark.parametrize("value", [None, [], {}, False, "10"])
+def test_value_invalid_types(session, typ, value):
+ response = set_timeouts(session, {typ: value})
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("value", [[], {}, False, "10"])
+def test_value_invalid_types_for_script(session, value):
+ response = set_timeouts(session, {"script": value})
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("typ", ["implicit", "pageLoad", "script"])
+@pytest.mark.parametrize("value", [-1, 2.5, 2**53])
+def test_value_positive_integer(session, typ, value):
+ response = set_timeouts(session, {typ: value})
+ assert_error(response, "invalid argument")
+
+
+def test_set_all_fields(session):
+ timeouts = {"implicit": 10, "pageLoad": 20, "script": 30}
+ response = set_timeouts(session, timeouts)
+ assert_success(response)
+
+ assert session.timeouts.implicit == 10
+ assert session.timeouts.page_load == 20
+ assert session.timeouts.script == 30
+
+
+def test_script_value_null(session):
+ response = set_timeouts(session, {"script": None})
+ assert_success(response)
+
+ assert session.timeouts.script is None
diff --git a/testing/web-platform/tests/webdriver/tests/set_timeouts/user_prompts.py b/testing/web-platform/tests/webdriver/tests/set_timeouts/user_prompts.py
new file mode 100644
index 0000000000..a98d87e9b2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/set_timeouts/user_prompts.py
@@ -0,0 +1,62 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_success
+
+
+def set_timeouts(session, timeouts):
+ return session.transport.send(
+ "POST", "session/{session_id}/timeouts".format(**vars(session)),
+ timeouts)
+
+
+@pytest.fixture
+def check_user_prompt_not_closed(session, create_dialog):
+ def check_user_prompt_not_closed(dialog_type):
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = set_timeouts(session, {"script": 100})
+ assert_success(response)
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert session.timeouts.script == 100
+
+ return check_user_prompt_not_closed
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_accept(check_user_prompt_not_closed, dialog_type):
+ check_user_prompt_not_closed(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_accept_and_notify(check_user_prompt_not_closed, dialog_type):
+ check_user_prompt_not_closed(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_dismiss(check_user_prompt_not_closed, dialog_type):
+ check_user_prompt_not_closed(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_dismiss_and_notify(check_user_prompt_not_closed, dialog_type):
+ check_user_prompt_not_closed(dialog_type)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed, dialog_type):
+ check_user_prompt_not_closed(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_default(check_user_prompt_not_closed, dialog_type):
+ check_user_prompt_not_closed(dialog_type)
diff --git a/testing/web-platform/tests/webdriver/tests/set_window_rect/__init__.py b/testing/web-platform/tests/webdriver/tests/set_window_rect/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/set_window_rect/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/set_window_rect/set.py b/testing/web-platform/tests/webdriver/tests/set_window_rect/set.py
new file mode 100644
index 0000000000..23382288a3
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/set_window_rect/set.py
@@ -0,0 +1,403 @@
+# META: timeout=long
+
+import pytest
+
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.helpers import (available_screen_size, document_hidden,
+ is_fullscreen, screen_size)
+
+
+def set_window_rect(session, rect):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/rect".format(**vars(session)),
+ rect)
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/window/rect".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = set_window_rect(session, {})
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = set_window_rect(session, {"width": 750, "height": 700})
+ value = assert_success(response)
+ assert value["width"] == 750
+ assert value["height"] == 700
+
+
+@pytest.mark.parametrize("rect", [
+ {"width": "a"},
+ {"height": "b"},
+ {"width": "a", "height": "b"},
+ {"x": "a"},
+ {"y": "b"},
+ {"x": "a", "y": "b"},
+ {"width": "a", "height": "b", "x": "a", "y": "b"},
+
+ {"width": True},
+ {"height": False},
+ {"width": True, "height": False},
+ {"x": True},
+ {"y": False},
+ {"x": True, "y": False},
+ {"width": True, "height": False, "x": True, "y": False},
+
+ {"width": []},
+ {"height": []},
+ {"width": [], "height": []},
+ {"x": []},
+ {"y": []},
+ {"x": [], "y": []},
+ {"width": [], "height": [], "x": [], "y": []},
+
+ {"height": {}},
+ {"width": {}},
+ {"height": {}, "width": {}},
+ {"x": {}},
+ {"y": {}},
+ {"x": {}, "y": {}},
+ {"width": {}, "height": {}, "x": {}, "y": {}},
+])
+def test_invalid_types(session, rect):
+ response = set_window_rect(session, rect)
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("rect", [
+ {"width": -1},
+ {"height": -2},
+ {"width": -1, "height": -2},
+])
+def test_out_of_bounds(session, rect):
+ response = set_window_rect(session, rect)
+ assert_error(response, "invalid argument")
+
+
+def test_width_height_floats(session):
+ response = set_window_rect(session, {"width": 750.5, "height": 700})
+ value = assert_success(response)
+ assert value["width"] == 750
+ assert value["height"] == 700
+
+ response = set_window_rect(session, {"width": 750, "height": 700.5})
+ value = assert_success(response)
+ assert value["width"] == 750
+ assert value["height"] == 700
+
+
+def test_x_y_floats(session):
+ response = set_window_rect(session, {"x": 0.5, "y": 420})
+ value = assert_success(response)
+ assert value["x"] == 0
+ assert value["y"] == 420
+
+ response = set_window_rect(session, {"x": 100, "y": 450.5})
+ value = assert_success(response)
+ assert value["x"] == 100
+ assert value["y"] == 450
+
+
+@pytest.mark.parametrize("rect", [
+ {},
+
+ {"width": None},
+ {"height": None},
+ {"width": None, "height": None},
+
+ {"x": None},
+ {"y": None},
+ {"x": None, "y": None},
+
+ {"width": None, "x": None},
+ {"width": None, "y": None},
+ {"height": None, "x": None},
+ {"height": None, "Y": None},
+
+ {"width": None, "height": None, "x": None, "y": None},
+
+ {"width": 200},
+ {"height": 200},
+ {"x": 200},
+ {"y": 200},
+ {"width": 200, "x": 200},
+ {"height": 200, "x": 200},
+ {"width": 200, "y": 200},
+ {"height": 200, "y": 200},
+])
+def test_no_change(session, rect):
+ original = session.window.rect
+ response = set_window_rect(session, rect)
+ assert_success(response, original)
+
+
+def test_fully_exit_fullscreen(session):
+ session.window.fullscreen()
+ assert is_fullscreen(session)
+
+ response = set_window_rect(session, {"width": 600, "height": 400})
+ value = assert_success(response)
+ assert value["width"] == 600
+ assert value["height"] == 400
+
+ assert not is_fullscreen(session)
+
+
+def test_restore_from_minimized(session):
+ session.window.minimize()
+ assert document_hidden(session)
+
+ response = set_window_rect(session, {"width": 750, "height": 700})
+ value = assert_success(response)
+ assert value["width"] == 750
+ assert value["height"] == 700
+
+ assert not document_hidden(session)
+
+
+def test_restore_from_maximized(session):
+ original_size = session.window.size
+ session.window.maximize()
+ assert session.window.size != original_size
+
+ response = set_window_rect(session, {"width": 750, "height": 700})
+ value = assert_success(response)
+ assert value["width"] == 750
+ assert value["height"] == 700
+
+
+def test_height_width(session):
+ # The window position might be auto-adjusted by the browser
+ # if it exceeds the lower right corner. As such ensure that
+ # there is enough space left so no window move will occur.
+ session.window.position = (50, 50)
+
+ original = session.window.rect
+ screen_width, screen_height = screen_size(session)
+
+ response = set_window_rect(session, {
+ "width": screen_width - 100,
+ "height": screen_height - 100
+ })
+ assert_success(response, {
+ "x": original["x"],
+ "y": original["y"],
+ "width": screen_width - 100,
+ "height": screen_height - 100,
+ })
+
+
+def test_height_width_smaller_than_minimum_browser_size(session):
+ original = session.window.rect
+
+ response = set_window_rect(session, {"width": 10, "height": 10})
+ rect = assert_success(response)
+ assert rect["width"] < original["width"]
+ assert rect["width"] > 10
+ assert rect["height"] < original["height"]
+ assert rect["height"] > 10
+
+
+def test_height_width_larger_than_max(session):
+ screen_width, screen_height = screen_size(session)
+ avail_width, avail_height = available_screen_size(session)
+
+ response = set_window_rect(session, {
+ "width": screen_width + 100,
+ "height": screen_height + 100
+ })
+ rect = assert_success(response)
+ assert rect["width"] >= avail_width
+ assert rect["height"] >= avail_height
+
+
+def test_height_width_as_current(session):
+ original = session.window.rect
+
+ response = set_window_rect(session, {
+ "width": original["width"],
+ "height": original["height"]
+ })
+ assert_success(response, {
+ "x": original["x"],
+ "y": original["y"],
+ "width": original["width"],
+ "height": original["height"]
+ })
+
+
+def test_height_as_current(session):
+ original = session.window.rect
+
+ response = set_window_rect(session, {
+ "width": original["width"] + 10,
+ "height": original["height"]
+ })
+ assert_success(response, {
+ "x": original["x"],
+ "y": original["y"],
+ "width": original["width"] + 10,
+ "height": original["height"]
+ })
+
+
+def test_width_as_current(session):
+ original = session.window.rect
+
+ response = set_window_rect(session, {
+ "width": original["width"],
+ "height": original["height"] + 10
+ })
+ assert_success(response, {
+ "x": original["x"],
+ "y": original["y"],
+ "width": original["width"],
+ "height": original["height"] + 10
+ })
+
+
+def test_x_y(session):
+ original = session.window.rect
+ response = set_window_rect(session, {
+ "x": original["x"] + 10,
+ "y": original["y"] + 10
+ })
+ assert_success(response, {
+ "x": original["x"] + 10,
+ "y": original["y"] + 10,
+ "width": original["width"],
+ "height": original["height"]
+ })
+
+
+def test_negative_x_y(session):
+ original = session.window.rect
+
+ response = set_window_rect(session, {"x": - 8, "y": - 8})
+
+ os = session.capabilities["platformName"]
+ # certain WMs prohibit windows from being moved off-screen
+ if os == "linux":
+ rect = assert_success(response)
+ assert rect["x"] <= 0
+ assert rect["y"] <= 0
+ assert rect["width"] == original["width"]
+ assert rect["height"] == original["height"]
+
+ # On macOS, windows can only be moved off the screen on the
+ # horizontal axis. The system menu bar also blocks windows from
+ # being moved to (0,0).
+ elif os == "mac":
+ value = assert_success(response)
+
+ # `screen.availTop` is not standardized but all browsers we care
+ # about on MacOS implement the CSSOM View mode `Screen` interface.
+ avail_top = session.execute_script("return window.screen.availTop;")
+
+ assert value == {"x": -8,
+ "y": avail_top,
+ "width": original["width"],
+ "height": original["height"]}
+
+ # It turns out that Windows is the only platform on which the
+ # window can be reliably positioned off-screen.
+ elif os == "windows":
+ assert_success(response, {"x": -8,
+ "y": -8,
+ "width": original["width"],
+ "height": original["height"]})
+
+
+def test_x_y_as_current(session):
+ original = session.window.rect
+
+ response = set_window_rect(session, {
+ "x": original["x"],
+ "y": original["y"]
+ })
+ assert_success(response, {
+ "x": original["x"],
+ "y": original["y"],
+ "width": original["width"],
+ "height": original["height"]
+ })
+
+
+def test_x_as_current(session):
+ original = session.window.rect
+
+ response = set_window_rect(session, {
+ "x": original["x"],
+ "y": original["y"] + 10
+ })
+ assert_success(response, {
+ "x": original["x"],
+ "y": original["y"] + 10,
+ "width": original["width"],
+ "height": original["height"]
+ })
+
+
+def test_y_as_current(session):
+ original = session.window.rect
+
+ response = set_window_rect(session, {
+ "x": original["x"] + 10,
+ "y": original["y"]
+ })
+ assert_success(response, {
+ "x": original["x"] + 10,
+ "y": original["y"],
+ "width": original["width"],
+ "height": original["height"]
+ })
+
+
+"""
+TODO(ato):
+
+ Disable test because the while statements are wrong.
+ To fix this properly we need to write an explicit wait utility.
+
+def test_resize_by_script(session):
+ # setting the window size by JS is asynchronous
+ # so we poll waiting for the results
+
+ size0 = session.window.size
+
+ session.execute_script("window.resizeTo(700, 800)")
+ size1 = session.window.size
+ while size0 == size1:
+ size1 = session.window.size
+ assert size1 == (700, 800)
+
+ session.execute_script("window.resizeTo(800, 900)")
+ size2 = session.window.size
+ while size1 == size2:
+ size2 = session.window.size
+ assert size2 == (800, 900)
+ assert size2 == {"width": 200, "height": 100}
+"""
+
+
+def test_payload(session):
+ response = set_window_rect(session, {"x": 400, "y": 400})
+
+ assert response.status == 200
+ assert isinstance(response.body["value"], dict)
+ value = response.body["value"]
+ assert "width" in value
+ assert "height" in value
+ assert "x" in value
+ assert "y" in value
+ assert isinstance(value["width"], int)
+ assert isinstance(value["height"], int)
+ assert isinstance(value["x"], int)
+ assert isinstance(value["y"], int)
diff --git a/testing/web-platform/tests/webdriver/tests/set_window_rect/user_prompts.py b/testing/web-platform/tests/webdriver/tests/set_window_rect/user_prompts.py
new file mode 100644
index 0000000000..908a9d920f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/set_window_rect/user_prompts.py
@@ -0,0 +1,121 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
+
+
+def set_window_rect(session, rect):
+ return session.transport.send(
+ "POST", "session/{session_id}/window/rect".format(**vars(session)),
+ rect)
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ original_rect = session.window.rect
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = set_window_rect(session, {
+ "x": original_rect["x"] + 10, "y": original_rect["y"] + 10})
+ assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.window.rect != original_rect
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ original_rect = session.window.rect
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = set_window_rect(session, {
+ "x": original_rect["x"] + 10, "y": original_rect["y"] + 10})
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert session.window.rect == original_rect
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ original_rect = session.window.rect
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = set_window_rect(session, {
+ "x": original_rect["x"] + 10, "y": original_rect["y"] + 10})
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ assert session.window.rect == original_rect
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/status/__init__.py b/testing/web-platform/tests/webdriver/tests/status/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/status/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/status/status.py b/testing/web-platform/tests/webdriver/tests/status/status.py
new file mode 100644
index 0000000000..8c7ae22a67
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/status/status.py
@@ -0,0 +1,33 @@
+import json
+
+from tests.support.asserts import assert_success
+
+
+def get_status(session):
+ return session.transport.send("GET", "/status")
+
+
+def test_get_status_no_session(http):
+ with http.get("/status") as response:
+ # GET /status should never return an error
+ assert response.status == 200
+
+ parsed_obj = json.loads(response.read().decode("utf-8"))
+ value = parsed_obj["value"]
+
+ assert value["ready"] in [True, False]
+ assert isinstance(value["message"], str)
+
+
+def test_status_with_session_running_on_endpoint_node(session):
+ response = get_status(session)
+ value = assert_success(response)
+ assert value["ready"] is False
+ assert "message" in value
+
+ session.end()
+
+ response = get_status(session)
+ value = assert_success(response)
+ assert value["ready"] is True
+ assert "message" in value
diff --git a/testing/web-platform/tests/webdriver/tests/support/__init__.py b/testing/web-platform/tests/webdriver/tests/support/__init__.py
new file mode 100644
index 0000000000..a7b33cc15f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/__init__.py
@@ -0,0 +1,14 @@
+import sys
+
+from .merge_dictionaries import merge_dictionaries
+
+platform_name = {
+ # From Python version 3.3: On Linux, sys.platform doesn't contain the major version anymore.
+ # It is always 'linux'. See
+ # https://docs.python.org/3/library/sys.html#sys.platform
+ "linux": "linux",
+ "linux2": "linux",
+ "win32": "windows",
+ "cygwin": "windows",
+ "darwin": "mac"
+}.get(sys.platform)
diff --git a/testing/web-platform/tests/webdriver/tests/support/asserts.py b/testing/web-platform/tests/webdriver/tests/support/asserts.py
new file mode 100644
index 0000000000..231d8ab1ca
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/asserts.py
@@ -0,0 +1,224 @@
+import imghdr
+from base64 import decodebytes
+
+from webdriver import Element, NoSuchAlertException, WebDriverException
+
+# WebDriver specification ID: dfn-error-response-data
+errors = {
+ "detached shadow root": 404,
+ "element click intercepted": 400,
+ "element not selectable": 400,
+ "element not interactable": 400,
+ "insecure certificate": 400,
+ "invalid argument": 400,
+ "invalid cookie domain": 400,
+ "invalid coordinates": 400,
+ "invalid element state": 400,
+ "invalid selector": 400,
+ "invalid session id": 404,
+ "javascript error": 500,
+ "move target out of bounds": 500,
+ "no such alert": 404,
+ "no such cookie": 404,
+ "no such element": 404,
+ "no such frame": 404,
+ "no such shadow root": 404,
+ "no such window": 404,
+ "script timeout": 500,
+ "session not created": 500,
+ "stale element reference": 404,
+ "timeout": 500,
+ "unable to set cookie": 500,
+ "unable to capture screen": 500,
+ "unexpected alert open": 500,
+ "unknown command": 404,
+ "unknown error": 500,
+ "unknown method": 405,
+ "unsupported operation": 500,
+}
+
+
+def assert_error(response, error_code):
+ """
+ Verify that the provided webdriver.Response instance described
+ a valid error response as defined by `dfn-send-an-error` and
+ the provided error code.
+
+ :param response: ``webdriver.Response`` instance.
+ :param error_code: String value of the expected error code
+ """
+ assert response.status == errors[error_code]
+ assert "value" in response.body
+ assert response.body["value"]["error"] == error_code
+ assert isinstance(response.body["value"]["message"], str)
+ assert isinstance(response.body["value"]["stacktrace"], str)
+ assert_response_headers(response.headers)
+
+
+def assert_success(response, value=None):
+ """
+ Verify that the provided webdriver.Response instance described
+ a valid success response as defined by `dfn-send-a-response` and
+ the provided response value.
+
+ :param response: ``webdriver.Response`` instance.
+ :param value: Expected value of the response body, if any.
+ """
+ assert response.status == 200, str(response.error)
+
+ if value is not None:
+ assert response.body["value"] == value
+
+ assert_response_headers(response.headers)
+ return response.body.get("value")
+
+
+def assert_response_headers(headers):
+ """
+ Method to assert response headers for WebDriver requests
+
+ :param headers: dict with header data
+ """
+ assert 'cache-control' in headers
+ assert 'no-cache' == headers['cache-control']
+ assert 'content-type' in headers
+ assert 'application/json; charset=utf-8' == headers['content-type']
+
+
+def assert_dialog_handled(session, expected_text, expected_retval):
+ # If there were any existing dialogs prior to the creation of this
+ # fixture's dialog, then the "Get Alert Text" command will return
+ # successfully. In that case, the text must be different than that
+ # of this fixture's dialog.
+ try:
+ assert session.alert.text != expected_text, (
+ "User prompt with text '{}' was not handled.".format(expected_text))
+
+ except NoSuchAlertException:
+ # If dialog has been closed and no other one is open, check its return value
+ prompt_retval = session.execute_script(" return window.dialog_return_value;")
+ assert prompt_retval == expected_retval
+
+
+def assert_files_uploaded(session, element, files):
+
+ def get_file_contents(file_index):
+ return session.execute_async_script("""
+ let files = arguments[0].files;
+ let index = arguments[1];
+ let resolve = arguments[2];
+
+ var reader = new FileReader();
+ reader.onload = function(event) {
+ resolve(reader.result);
+ };
+ reader.readAsText(files[index]);
+ """, (element, file_index))
+
+ def get_uploaded_file_names():
+ return session.execute_script("""
+ let fileList = arguments[0].files;
+ let files = [];
+
+ for (var i = 0; i < fileList.length; i++) {
+ files.push(fileList[i].name);
+ }
+
+ return files;
+ """, args=(element,))
+
+ expected_file_names = [str(f.basename) for f in files]
+ assert get_uploaded_file_names() == expected_file_names
+
+ for index, f in enumerate(files):
+ assert get_file_contents(index) == f.read()
+
+
+def assert_is_active_element(session, element):
+ """Verify that element reference is the active element."""
+ from_js = session.execute_script("return document.activeElement")
+
+ if element is None:
+ assert from_js is None
+ else:
+ assert_same_element(session, element, from_js)
+
+
+def assert_same_element(session, a, b):
+ """Verify that two element references describe the same element."""
+ if isinstance(a, dict):
+ assert Element.identifier in a, "Actual value does not describe an element"
+ a_id = a[Element.identifier]
+ elif isinstance(a, Element):
+ a_id = a.id
+ else:
+ raise AssertionError("Actual value is not a dictionary or web element")
+
+ if isinstance(b, dict):
+ assert Element.identifier in b, "Expected value does not describe an element"
+ b_id = b[Element.identifier]
+ elif isinstance(b, Element):
+ b_id = b.id
+ else:
+ raise AssertionError("Expected value is not a dictionary or web element")
+
+ if a_id == b_id:
+ return
+
+ message = ("Expected element references to describe the same element, " +
+ "but they did not.")
+
+ # Attempt to provide more information, accounting for possible errors such
+ # as stale element references or not visible elements.
+ try:
+ a_markup = session.execute_script("return arguments[0].outerHTML;", args=(a,))
+ b_markup = session.execute_script("return arguments[0].outerHTML;", args=(b,))
+ message += " Actual: `%s`. Expected: `%s`." % (a_markup, b_markup)
+ except WebDriverException:
+ pass
+
+ raise AssertionError(message)
+
+
+def assert_in_events(session, expected_events):
+ actual_events = session.execute_script("return window.events")
+ for expected_event in expected_events:
+ assert expected_event in actual_events
+
+
+def assert_events_equal(session, expected_events):
+ actual_events = session.execute_script("return window.events")
+ assert actual_events == expected_events
+
+
+def assert_element_has_focus(target_element):
+ session = target_element.session
+
+ active_element = session.execute_script("return document.activeElement")
+ active_tag = active_element.property("localName")
+ target_tag = target_element.property("localName")
+
+ assert active_element == target_element, (
+ "Focussed element is <%s>, not <%s>" % (active_tag, target_tag))
+
+
+def assert_move_to_coordinates(point, target, events):
+ for e in events:
+ if e["type"] != "mousemove":
+ assert e["pageX"] == point["x"]
+ assert e["pageY"] == point["y"]
+ assert e["target"] == target
+
+
+def assert_png(screenshot):
+ """Test that screenshot is a Base64 encoded PNG file, or a bytestring representing a PNG.
+
+ Returns the bytestring for the PNG, if the assert passes
+ """
+ if type(screenshot) == str:
+ image = decodebytes(screenshot.encode())
+ else:
+ image = screenshot
+ mime_type = imghdr.what("", image)
+ assert mime_type == "png", "Expected image to be PNG, but it was {}".format(mime_type)
+ return image
diff --git a/testing/web-platform/tests/webdriver/tests/support/defaults.py b/testing/web-platform/tests/webdriver/tests/support/defaults.py
new file mode 100644
index 0000000000..64ee18b6c1
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/defaults.py
@@ -0,0 +1,6 @@
+SCRIPT_TIMEOUT = 30
+PAGE_LOAD_TIMEOUT = 300
+IMPLICIT_WAIT_TIMEOUT = 0
+
+WINDOW_POSITION = (100, 100)
+WINDOW_SIZE = (800, 600)
diff --git a/testing/web-platform/tests/webdriver/tests/support/fixtures.py b/testing/web-platform/tests/webdriver/tests/support/fixtures.py
new file mode 100644
index 0000000000..e63c0d01d2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/fixtures.py
@@ -0,0 +1,256 @@
+import copy
+import json
+import os
+
+import asyncio
+import pytest
+import webdriver
+
+from urllib.parse import urlunsplit
+
+from tests.support import defaults
+from tests.support.helpers import cleanup_session, deep_update
+from tests.support.inline import build_inline
+from tests.support.http_request import HTTPRequest
+
+
+# The webdriver session can outlive a pytest session
+_current_session = None
+
+# The event loop needs to outlive the webdriver session
+_event_loop = None
+
+_custom_session = False
+
+
+def pytest_configure(config):
+ # register the capabilities marker
+ config.addinivalue_line(
+ "markers",
+ "capabilities: mark test to use capabilities"
+ )
+
+
+def pytest_sessionfinish(session, exitstatus):
+ # Cleanup at the end of a test run
+ global _current_session
+
+ if _current_session is not None:
+ _current_session.end()
+ _current_session = None
+
+
+@pytest.fixture
+def capabilities():
+ """Default capabilities to use for a new WebDriver session."""
+ return {}
+
+
+def pytest_generate_tests(metafunc):
+ if "capabilities" in metafunc.fixturenames:
+ marker = metafunc.definition.get_closest_marker(name="capabilities")
+ if marker:
+ metafunc.parametrize("capabilities", marker.args, ids=None)
+
+
+@pytest.fixture(scope="session")
+def event_loop():
+ """Change event_loop fixture to global."""
+ global _event_loop
+
+ if _event_loop is None:
+ _event_loop = asyncio.get_event_loop_policy().new_event_loop()
+ return _event_loop
+
+
+@pytest.fixture
+def http(configuration):
+ return HTTPRequest(configuration["host"], configuration["port"])
+
+
+@pytest.fixture(scope="session")
+def full_configuration():
+ """Get test configuration information. Keys are:
+
+ host - WebDriver server host.
+ port - WebDriver server port.
+ capabilites - Capabilites passed when creating the WebDriver session
+ webdriver - Dict with keys `binary`: path to webdriver binary, and
+ `args`: Additional command line arguments passed to the webdriver
+ binary. This doesn't include all the required arguments e.g. the
+ port.
+ wptserve - Configuration of the wptserve servers."""
+
+ with open(os.environ.get("WDSPEC_CONFIG_FILE"), "r") as f:
+ return json.load(f)
+
+
+@pytest.fixture(scope="session")
+def server_config(full_configuration):
+ return full_configuration["wptserve"]
+
+
+@pytest.fixture(scope="session")
+def configuration(full_configuration):
+ """Configuation minus server config.
+
+ This makes logging easier to read."""
+
+ config = full_configuration.copy()
+ del config["wptserve"]
+
+ return config
+
+
+async def reset_current_session_if_necessary(caps):
+ global _current_session
+
+ # If there is a session with different requested capabilities active than
+ # the one we would like to create, end it now.
+ if _current_session is not None:
+ if not _current_session.match(caps):
+ is_bidi = isinstance(_current_session, webdriver.BidiSession)
+ if is_bidi:
+ await _current_session.end()
+ else:
+ _current_session.end()
+ _current_session = None
+
+
+@pytest.fixture(scope="function")
+async def session(capabilities, configuration):
+ """Create and start a session for a test that does not itself test session creation.
+
+ By default the session will stay open after each test, but we always try to start a
+ new one and assume that if that fails there is already a valid session. This makes it
+ possible to recover from some errors that might leave the session in a bad state, but
+ does not demand that we start a new session per test.
+ """
+ global _current_session
+
+ # Update configuration capabilities with custom ones from the
+ # capabilities fixture, which can be set by tests
+ caps = copy.deepcopy(configuration["capabilities"])
+ deep_update(caps, capabilities)
+ caps = {"alwaysMatch": caps}
+
+ await reset_current_session_if_necessary(caps)
+
+ if _current_session is None:
+ _current_session = webdriver.Session(
+ configuration["host"],
+ configuration["port"],
+ capabilities=caps)
+
+ _current_session.start()
+
+ # Enforce a fixed default window size and position
+ if _current_session.capabilities.get("setWindowRect"):
+ _current_session.window.size = defaults.WINDOW_SIZE
+ _current_session.window.position = defaults.WINDOW_POSITION
+
+ yield _current_session
+
+ cleanup_session(_current_session)
+
+
+@pytest.fixture(scope="function")
+async def bidi_session(capabilities, configuration):
+ """Create and start a bidi session.
+
+ Can be used for a test that does not itself test bidi session creation.
+
+ By default the session will stay open after each test, but we always try to start a
+ new one and assume that if that fails there is already a valid session. This makes it
+ possible to recover from some errors that might leave the session in a bad state, but
+ does not demand that we start a new session per test.
+ """
+ global _current_session
+
+ # Update configuration capabilities with custom ones from the
+ # capabilities fixture, which can be set by tests
+ caps = copy.deepcopy(configuration["capabilities"])
+ caps.update({"webSocketUrl": True})
+ deep_update(caps, capabilities)
+ caps = {"alwaysMatch": caps}
+
+ await reset_current_session_if_necessary(caps)
+
+ if _current_session is None:
+ _current_session = webdriver.Session(
+ configuration["host"],
+ configuration["port"],
+ capabilities=caps,
+ enable_bidi=True)
+
+ _current_session.start()
+ await _current_session.bidi_session.start()
+
+ # Enforce a fixed default window size and position
+ if _current_session.capabilities.get("setWindowRect"):
+ _current_session.window.size = defaults.WINDOW_SIZE
+ _current_session.window.position = defaults.WINDOW_POSITION
+
+ yield _current_session.bidi_session
+
+ await _current_session.bidi_session.end()
+ cleanup_session(_current_session)
+
+
+@pytest.fixture(scope="function")
+def current_session():
+ return _current_session
+
+
+@pytest.fixture
+def url(server_config):
+ def url(path, protocol="http", domain="", subdomain="", query="", fragment=""):
+ domain = server_config["domains"][domain][subdomain]
+ port = server_config["ports"][protocol][0]
+ host = "{0}:{1}".format(domain, port)
+ return urlunsplit((protocol, host, path, query, fragment))
+
+ return url
+
+
+@pytest.fixture
+def inline(url):
+ """Take a source extract and produces well-formed documents.
+
+ Based on the desired document type, the extract is embedded with
+ predefined boilerplate in order to produce well-formed documents.
+ The media type and character set may also be individually configured.
+
+ This helper function originally used data URLs, but since these
+ are not universally supported (or indeed standardised!) across
+ browsers, it now delegates the serving of the document to wptserve.
+ This file also acts as a wptserve handler (see the main function
+ below) which configures the HTTP response using query parameters.
+
+ This function returns a URL to the wptserve handler, which in turn
+ will serve an HTTP response with the requested source extract
+ inlined in a well-formed document, and the Content-Type header
+ optionally configured using the desired media type and character set.
+
+ Any additional keyword arguments are passed on to the build_url
+ function, which comes from the url fixture.
+ """
+ def inline(src, **kwargs):
+ return build_inline(url, src, **kwargs)
+
+ return inline
+
+
+@pytest.fixture
+def iframe(inline):
+ """Inline document extract as the source document of an <iframe>."""
+ def iframe(src, **kwargs):
+ return "<iframe src='{}'></iframe>".format(inline(src, **kwargs))
+
+ return iframe
+
+
+@pytest.fixture
+async def top_context(bidi_session):
+ contexts = await bidi_session.browsing_context.get_tree()
+ return contexts[0]
diff --git a/testing/web-platform/tests/webdriver/tests/support/fixtures_bidi.py b/testing/web-platform/tests/webdriver/tests/support/fixtures_bidi.py
new file mode 100644
index 0000000000..db0903017c
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/fixtures_bidi.py
@@ -0,0 +1,116 @@
+import asyncio
+from typing import Any, Mapping
+
+import pytest
+import webdriver
+from webdriver.bidi.error import InvalidArgumentException, NoSuchFrameException
+from webdriver.bidi.modules.script import ContextTarget
+
+
+@pytest.fixture
+async def subscribe_events(bidi_session):
+ subscriptions = []
+
+ async def subscribe_events(events, contexts=None):
+ await bidi_session.session.subscribe(events=events, contexts=contexts)
+ subscriptions.append((events, contexts))
+
+ yield subscribe_events
+
+ for events, contexts in reversed(subscriptions):
+ try:
+ await bidi_session.session.unsubscribe(
+ events=events, contexts=contexts
+ )
+ except (InvalidArgumentException, NoSuchFrameException):
+ pass
+
+
+@pytest.fixture
+async def new_tab(bidi_session):
+ """Open and focus a new tab to run the test in a foreground tab."""
+ new_tab = await bidi_session.browsing_context.create(type_hint='tab')
+ yield new_tab
+ # Close the tab.
+ await bidi_session.browsing_context.close(context=new_tab["context"])
+
+
+@pytest.fixture
+def send_blocking_command(bidi_session):
+ """Send a blocking command that awaits until the BiDi response has been received."""
+ async def send_blocking_command(command: str, params: Mapping[str, Any]) -> Mapping[str, Any]:
+ future_response = await bidi_session.send_command(command, params)
+ return await future_response
+ return send_blocking_command
+
+
+@pytest.fixture
+def wait_for_event(bidi_session, event_loop):
+ """Wait until the BiDi session emits an event and resolve the event data."""
+ def wait_for_event(event_name: str):
+ future = event_loop.create_future()
+
+ async def on_event(method, data):
+ remove_listener()
+ future.set_result(data)
+
+ remove_listener = bidi_session.add_event_listener(event_name, on_event)
+
+ return future
+ return wait_for_event
+
+
+@pytest.fixture
+def current_time(bidi_session, top_context):
+ """Get the current time stamp in ms from the remote end.
+
+ This is required especially when tests are run on different devices like
+ for Android, where it's not guaranteed that both machines are in sync.
+ """
+ async def _():
+ result = await bidi_session.script.evaluate(
+ expression="Date.now()",
+ target=ContextTarget(top_context["context"]),
+ await_promise=True)
+ return result["value"]
+
+ return _
+
+
+@pytest.fixture
+def add_and_remove_iframe(bidi_session, inline):
+ """Create a frame, wait for load, and remove it.
+
+ Return the frame's context id, which allows to test for invalid
+ browsing context references.
+ """
+ async def closed_frame(context, url=inline("test-frame")):
+ initial_contexts = await bidi_session.browsing_context.get_tree(root=context["context"])
+ resp = await bidi_session.script.call_function(
+ function_declaration="""(url) => {
+ const iframe = document.createElement("iframe");
+ // Once we're confident implementations support returning the iframe, just
+ // return that directly. For now generate a unique id to use as a handle.
+ const id = `testframe-${Math.random()}`;
+ iframe.id = id;
+ iframe.src = url;
+ document.documentElement.lastElementChild.append(iframe);
+ return new Promise(resolve => iframe.onload = () => resolve(id))
+ }""",
+ target={"context": context["context"]},
+ await_promise=True)
+ iframe_dom_id = resp["value"]
+
+ new_contexts = await bidi_session.browsing_context.get_tree(root=context["context"])
+ added_contexts = ({item["context"] for item in new_contexts[0]["children"]} -
+ {item["context"] for item in initial_contexts[0]["children"]})
+ assert len(added_contexts) == 1
+ frame_id = added_contexts.pop()
+
+ await bidi_session.script.evaluate(
+ expression=f"document.getElementById('{iframe_dom_id}').remove()",
+ target={"context": context["context"]},
+ await_promise=False)
+
+ return frame_id
+ return closed_frame
diff --git a/testing/web-platform/tests/webdriver/tests/support/fixtures_http.py b/testing/web-platform/tests/webdriver/tests/support/fixtures_http.py
new file mode 100644
index 0000000000..42a74ba41f
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/fixtures_http.py
@@ -0,0 +1,176 @@
+import pytest
+from webdriver.error import NoSuchAlertException
+
+from tests.support.sync import Poll
+
+
+@pytest.fixture
+def add_event_listeners(session):
+ """Register listeners for tracked events on element."""
+ def add_event_listeners(element, tracked_events):
+ element.session.execute_script("""
+ let element = arguments[0];
+ let trackedEvents = arguments[1];
+
+ if (!("events" in window)) {
+ window.events = [];
+ }
+
+ for (var i = 0; i < trackedEvents.length; i++) {
+ element.addEventListener(trackedEvents[i], function (event) {
+ window.events.push(event.type);
+ });
+ }
+ """, args=(element, tracked_events))
+ return add_event_listeners
+
+
+@pytest.fixture
+def closed_frame(session, url):
+ """Create a frame and remove it after switching to it.
+
+ The removed frame will be kept selected, which allows to test for invalid
+ browsing context references.
+ """
+ original_handle = session.window_handle
+ new_handle = session.new_window()
+
+ session.window_handle = new_handle
+
+ session.url = url("/webdriver/tests/support/html/frames.html")
+
+ subframe = session.find.css("#sub-frame", all=False)
+ session.switch_frame(subframe)
+
+ deleteframe = session.find.css("#delete-frame", all=False)
+ session.switch_frame(deleteframe)
+
+ button = session.find.css("#remove-parent", all=False)
+ button.click()
+
+ yield
+
+ session.window.close()
+ assert new_handle not in session.handles, "Unable to close window {}".format(new_handle)
+
+ session.window_handle = original_handle
+
+
+@pytest.fixture
+def closed_window(session, inline):
+ """Create a window and close it immediately.
+
+ The window handle will be kept selected, which allows to test for invalid
+ top-level browsing context references.
+ """
+ original_handle = session.window_handle
+ new_handle = session.new_window()
+
+ session.window_handle = new_handle
+ session.url = inline("<input id='a' value='b'>")
+ element = session.find.css("input", all=False)
+
+ session.window.close()
+ assert new_handle not in session.handles, "Unable to close window {}".format(new_handle)
+
+ yield (original_handle, element)
+
+ session.window_handle = original_handle
+
+
+@pytest.fixture
+def create_cookie(session, url):
+ """Create a cookie."""
+ def create_cookie(name, value, **kwargs):
+ if kwargs.get("path", None) is not None:
+ session.url = url(kwargs["path"])
+
+ session.set_cookie(name, value, **kwargs)
+ return session.cookies(name)
+
+ return create_cookie
+
+
+@pytest.fixture
+def create_dialog(session):
+ """Create a dialog (one of "alert", "prompt", or "confirm").
+
+ Also it provides a function to validate that the dialog has been "handled"
+ (either accepted or dismissed) by returning some value.
+ """
+ def create_dialog(dialog_type, text=None):
+ assert dialog_type in ("alert", "confirm", "prompt"), (
+ "Invalid dialog type: '%s'" % dialog_type)
+
+ if text is None:
+ text = ""
+
+ assert isinstance(text, str), "`text` parameter must be a string"
+
+ # Script completes itself when the user prompt has been opened.
+ # For prompt() dialogs, add a value for the 'default' argument,
+ # as some user agents (IE, for example) do not produce consistent
+ # values for the default.
+ session.execute_async_script("""
+ let dialog_type = arguments[0];
+ let text = arguments[1];
+
+ setTimeout(function() {
+ if (dialog_type == 'prompt') {
+ window.dialog_return_value = window[dialog_type](text, '');
+ } else {
+ window.dialog_return_value = window[dialog_type](text);
+ }
+ }, 0);
+ """, args=(dialog_type, text))
+
+ wait = Poll(
+ session,
+ timeout=15,
+ ignored_exceptions=NoSuchAlertException,
+ message="No user prompt with text '{}' detected".format(text))
+ wait.until(lambda s: s.alert.text == text)
+
+ return create_dialog
+
+
+@pytest.fixture
+def create_frame(session):
+ """Create an `iframe` element.
+
+ The element will be inserted into the document of the current browsing
+ context. Return a reference to the newly-created element.
+ """
+ def create_frame():
+ append = """
+ var frame = document.createElement('iframe');
+ document.body.appendChild(frame);
+ return frame;
+ """
+ return session.execute_script(append)
+
+ return create_frame
+
+
+@pytest.fixture
+def stale_element(current_session, iframe, inline):
+ """Create a stale element reference
+
+ The given document will be loaded in the top-level or child browsing context.
+ Before the requested element is returned it is removed from the document's DOM.
+ """
+ def stale_element(doc, css_value, as_frame=False):
+ if as_frame:
+ current_session.url = inline(iframe(doc))
+ frame = current_session.find.css("iframe", all=False)
+ current_session.switch_frame(frame)
+ else:
+ current_session.url = inline(doc)
+
+ element = current_session.find.css(css_value, all=False)
+
+ current_session.execute_script("arguments[0].remove();", args=[element])
+
+ return element
+
+ return stale_element
diff --git a/testing/web-platform/tests/webdriver/tests/support/helpers.py b/testing/web-platform/tests/webdriver/tests/support/helpers.py
new file mode 100644
index 0000000000..e79a31448a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/helpers.py
@@ -0,0 +1,263 @@
+from __future__ import print_function
+
+import collections
+import math
+import sys
+
+import webdriver
+
+from tests.support import defaults
+from tests.support.sync import Poll
+
+
+def ignore_exceptions(f):
+ def inner(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except webdriver.error.WebDriverException as e:
+ print("Ignored exception %s" % e, file=sys.stderr)
+ inner.__name__ = f.__name__
+ return inner
+
+
+def cleanup_session(session):
+ """Clean-up the current session for a clean state."""
+ @ignore_exceptions
+ def _dismiss_user_prompts(session):
+ """Dismiss any open user prompts in windows."""
+ current_window = session.window_handle
+
+ for window in _windows(session):
+ session.window_handle = window
+ try:
+ session.alert.dismiss()
+ except webdriver.NoSuchAlertException:
+ pass
+
+ session.window_handle = current_window
+
+ @ignore_exceptions
+ def _ensure_valid_window(session):
+ """If current window was closed, ensure to have a valid one selected."""
+ try:
+ session.window_handle
+ except webdriver.NoSuchWindowException:
+ session.window_handle = session.handles[0]
+
+ @ignore_exceptions
+ def _restore_timeouts(session):
+ """Restore modified timeouts to their default values."""
+ session.timeouts.implicit = defaults.IMPLICIT_WAIT_TIMEOUT
+ session.timeouts.page_load = defaults.PAGE_LOAD_TIMEOUT
+ session.timeouts.script = defaults.SCRIPT_TIMEOUT
+
+ @ignore_exceptions
+ def _restore_window_state(session):
+ """Reset window to an acceptable size.
+
+ This also includes bringing it out of maximized, minimized,
+ or fullscreened state.
+ """
+ if session.capabilities.get("setWindowRect"):
+ session.window.size = defaults.WINDOW_SIZE
+
+ @ignore_exceptions
+ def _restore_windows(session):
+ """Close superfluous windows opened by the test.
+
+ It will not end the session implicitly by closing the last window.
+ """
+ current_window = session.window_handle
+
+ for window in _windows(session, exclude=[current_window]):
+ session.window_handle = window
+ if len(session.handles) > 1:
+ session.window.close()
+
+ session.window_handle = current_window
+
+ _restore_timeouts(session)
+ _ensure_valid_window(session)
+ _dismiss_user_prompts(session)
+ _restore_windows(session)
+ _restore_window_state(session)
+ _switch_to_top_level_browsing_context(session)
+
+
+@ignore_exceptions
+def _switch_to_top_level_browsing_context(session):
+ """If the current browsing context selected by WebDriver is a
+ `<frame>` or an `<iframe>`, switch it back to the top-level
+ browsing context.
+ """
+ session.switch_frame(None)
+
+
+def _windows(session, exclude=None):
+ """Set of window handles, filtered by an `exclude` list if
+ provided.
+ """
+ if exclude is None:
+ exclude = []
+ wins = [w for w in session.handles if w not in exclude]
+ return set(wins)
+
+
+def clear_all_cookies(session):
+ """Removes all cookies associated with the current active document"""
+ session.transport.send("DELETE", "session/%s/cookie" % session.session_id)
+
+
+def deep_update(source, overrides):
+ """
+ Update a nested dictionary or similar mapping.
+ Modify ``source`` in place.
+ """
+ for key, value in overrides.items():
+ if isinstance(value, collections.abc.Mapping) and value:
+ returned = deep_update(source.get(key, {}), value)
+ source[key] = returned
+ else:
+ source[key] = overrides[key]
+ return source
+
+
+def document_dimensions(session):
+ return tuple(session.execute_script("""
+ let rect = document.documentElement.getBoundingClientRect();
+ return [rect.width, rect.height];
+ """))
+
+
+def center_point(element):
+ """Calculates the in-view center point of a web element."""
+ inner_width, inner_height = element.session.execute_script(
+ "return [window.innerWidth, window.innerHeight]")
+ rect = element.rect
+
+ # calculate the intersection of the rect that is inside the viewport
+ visible = {
+ "left": max(0, min(rect["x"], rect["x"] + rect["width"])),
+ "right": min(inner_width, max(rect["x"], rect["x"] + rect["width"])),
+ "top": max(0, min(rect["y"], rect["y"] + rect["height"])),
+ "bottom": min(inner_height, max(rect["y"], rect["y"] + rect["height"])),
+ }
+
+ # arrive at the centre point of the visible rectangle
+ x = (visible["left"] + visible["right"]) / 2.0
+ y = (visible["top"] + visible["bottom"]) / 2.0
+
+ # convert to CSS pixels, as centre point can be float
+ return (math.floor(x), math.floor(y))
+
+
+def document_hidden(session):
+ """Polls for the document to become hidden."""
+ def hidden(session):
+ return session.execute_script("return document.hidden")
+ return Poll(session, timeout=3, raises=None).until(hidden)
+
+
+def document_location(session):
+ """
+ Unlike ``webdriver.Session#url``, which always returns
+ the top-level browsing context's URL, this returns
+ the current browsing context's active document's URL.
+ """
+ return session.execute_script("return document.location.href")
+
+
+def element_rect(session, element):
+ return session.execute_script("""
+ let element = arguments[0];
+ let rect = element.getBoundingClientRect();
+
+ return {
+ x: rect.left + window.pageXOffset,
+ y: rect.top + window.pageYOffset,
+ width: rect.width,
+ height: rect.height,
+ };
+ """, args=(element,))
+
+
+def is_element_in_viewport(session, element):
+ """Check if element is outside of the viewport"""
+ return session.execute_script("""
+ let el = arguments[0];
+
+ let rect = el.getBoundingClientRect();
+ let viewport = {
+ height: window.innerHeight || document.documentElement.clientHeight,
+ width: window.innerWidth || document.documentElement.clientWidth,
+ };
+
+ return !(rect.right < 0 || rect.bottom < 0 ||
+ rect.left > viewport.width || rect.top > viewport.height)
+ """, args=(element,))
+
+
+def is_fullscreen(session):
+ # At the time of writing, WebKit does not conform to the
+ # Fullscreen API specification.
+ #
+ # Remove the prefixed fallback when
+ # https://bugs.webkit.org/show_bug.cgi?id=158125 is fixed.
+ return session.execute_script("""
+ return !!(window.fullScreen || document.webkitIsFullScreen)
+ """)
+
+
+def document_dimensions(session):
+ return tuple(session.execute_script("""
+ let {devicePixelRatio} = window;
+ let {width, height} = document.documentElement.getBoundingClientRect();
+ return [width * devicePixelRatio, height * devicePixelRatio];
+ """))
+
+
+def screen_size(session):
+ """Returns the available width/height size of the screen."""
+ return tuple(session.execute_script("""
+ return [
+ screen.availWidth,
+ screen.availHeight,
+ ];
+ """))
+
+
+def available_screen_size(session):
+ """
+ Returns the effective available screen width/height size,
+ excluding any fixed window manager elements.
+ """
+ return tuple(session.execute_script("""
+ return [
+ screen.availWidth - screen.availLeft,
+ screen.availHeight - screen.availTop,
+ ];
+ """))
+
+def filter_dict(source, d):
+ """Filter `source` dict to only contain same keys as `d` dict.
+
+ :param source: dictionary to filter.
+ :param d: dictionary whose keys determine the filtering.
+ """
+ return {k: source[k] for k in d.keys()}
+
+
+def wait_for_new_handle(session, handles_before):
+ def find_new_handle(session):
+ new_handles = list(set(session.handles) - set(handles_before))
+ if new_handles and len(new_handles) == 1:
+ return new_handles[0]
+ return None
+
+ wait = Poll(
+ session,
+ timeout=5,
+ message="No new window has been opened")
+
+ return wait.until(find_new_handle)
+
diff --git a/testing/web-platform/tests/webdriver/tests/support/html/deleteframe.html b/testing/web-platform/tests/webdriver/tests/support/html/deleteframe.html
new file mode 100644
index 0000000000..fd757e6db0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/html/deleteframe.html
@@ -0,0 +1,6 @@
+<html>
+<body>
+ <input type="button" id="remove-parent" onclick="parent.remove();" value="Remove parent frame" />
+ <input type="button" id="remove-top" onclick="top.remove();" value="Remove top frame" />
+</body>
+</html>
diff --git a/testing/web-platform/tests/webdriver/tests/support/html/frames.html b/testing/web-platform/tests/webdriver/tests/support/html/frames.html
new file mode 100644
index 0000000000..81c6f9b383
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/html/frames.html
@@ -0,0 +1,16 @@
+<html>
+<head>
+ <script type="text/javascript">
+ function remove() {
+ const frame = document.getElementById("sub-frame");
+ const div = document.getElementById("delete");
+ div.removeChild(frame);
+ }
+ </script>
+</head>
+<body>
+ <div id="delete">
+ <iframe src="subframe.html" id="sub-frame"></iframe>
+ </div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webdriver/tests/support/html/frames_no_bfcache.html b/testing/web-platform/tests/webdriver/tests/support/html/frames_no_bfcache.html
new file mode 100644
index 0000000000..1972187d21
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/html/frames_no_bfcache.html
@@ -0,0 +1,18 @@
+<html>
+<head>
+ <script type="text/javascript">
+ function remove() {
+ const frame = document.getElementById("sub-frame");
+ const div = document.getElementById("delete");
+ div.removeChild(frame);
+ }
+ </script>
+</head>
+
+<!-- unload handler prevents the page from being added to the bfcache on navigation -->
+<body onunload="">
+ <div id="delete">
+ <iframe src="subframe.html" id="sub-frame"></iframe>
+ </div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webdriver/tests/support/html/meta-utf8-after-1024-bytes.html b/testing/web-platform/tests/webdriver/tests/support/html/meta-utf8-after-1024-bytes.html
new file mode 100644
index 0000000000..b5916148b5
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/html/meta-utf8-after-1024-bytes.html
@@ -0,0 +1,17 @@
+<!DOCTYPE html>
+<!-- Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla eu iaculis
+lectus. Quisque ullamcorper est at nunc consectetur suscipit. Aliquam imperdiet
+mauris in nulla ornare, id eleifend turpis placerat. Vestibulum lorem libero,
+sollicitudin in orci suscipit, dictum vestibulum nulla. Ut ac est tincidunt,
+cursus leo vel, pellentesque orci. Sed mattis metus augue, ac tincidunt nunc
+lobortis in. Proin eu ipsum auctor lorem sagittis malesuada. Vivamus maximus,
+eros fringilla vulputate tincidunt, tellus tellus viverra augue, sed iaculis
+ipsum lacus quis tellus. Morbi et enim at ante molestie imperdiet et et nulla.
+Aliquam consequat rhoncus magna, vitae sodales urna maximus eget. Mauris eu
+laoreet turpis, eget condimentum lectus. Maecenas vel lorem vel nulla efficitur
+euismod. Sed lobortis enim ac odio bibendum, id vehicula nibh tempus. Phasellus
+sodales, ipsum feugiat aliquam vehicula, diam leo cursus est, nec varius nunc
+felis vitae est. Curabitur ac purus nisl. Mauris condimentum, magna quis
+consectetur biam. -->
+<meta charset="utf-8">
+<div id="body"></div>
diff --git a/testing/web-platform/tests/webdriver/tests/support/html/render.html b/testing/web-platform/tests/webdriver/tests/support/html/render.html
new file mode 100644
index 0000000000..d0408480da
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/html/render.html
@@ -0,0 +1,68 @@
+<!doctype html>
+<canvas></canvas>
+<script>
+async function render(ctx, imgBase64, width, height) {
+ ctx.clearRect(0, 0, width, height);
+ const img = new Image();
+ const loaded = new Promise(resolve => img.addEventListener("load" , resolve, false));
+ img.src = `data:image/png;base64,${imgBase64}`;
+ await loaded;
+ ctx.drawImage(img, 0, 0);
+ return ctx.getImageData(0, 0, width, height);
+}
+
+function compareImgData(img1, img2) {
+ if (img1.width !== img2.width) {
+ throw new Error(`Image widths don't match; got ${img1.width} and ${img2.width}`)
+ }
+ if (img1.height !== img2.height) {
+ throw new Error(`Image heights don't match; got ${img1.height} and ${img2.height}`)
+ }
+
+ const result = {totalPixels: 0, maxDifference: 0};
+
+ const img1Data = img1.data;
+ const img2Data = img2.data;
+
+ let idx = 0;
+ while (idx < img1Data.length) {
+ let maxDifference = 0;
+ for (let channel=0; channel<4; channel++) {
+ const difference = Math.abs(img1Data[idx + channel] - img2Data[idx + channel]);
+ if (difference > maxDifference) {
+ maxDifference = difference
+ }
+ }
+ if (maxDifference > 0) {
+ result.totalPixels += 1;
+ if (maxDifference > result.maxDifference) {
+ result.maxDifference = maxDifference;
+ }
+ }
+ idx += 4;
+ }
+ return result;
+}
+
+/**
+ * Compare two images for equality.
+ *
+ * @param {string} img1 - base64-encoded string of image 1
+ * @param {string} img2 - base64-encoded string of image 2
+ * @param {number} width - Image width in pixels
+ * @param {number} height - Image height in pixels
+ * @returns {Promise<Object>} - A promise that resolves to an object containing `totalPixels`; the
+ * number of pixels different between the images, and `maxDifference`
+ * the maximum difference in any color channel.
+ */
+async function compare(img1, img2, width, height) {
+ const canvas = document.getElementsByTagName("canvas")[0];
+ canvas.width = width;
+ canvas.height = height;
+ const ctx = canvas.getContext("2d");
+
+ let img1Data = await render(ctx, img1, width, height);
+ let img2Data = await render(ctx, img2, width, height);
+ return compareImgData(img1Data, img2Data, width, height);
+}
+</script>
diff --git a/testing/web-platform/tests/webdriver/tests/support/html/subframe.html b/testing/web-platform/tests/webdriver/tests/support/html/subframe.html
new file mode 100644
index 0000000000..2019485529
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/html/subframe.html
@@ -0,0 +1,16 @@
+<html>
+<head>
+ <script type="text/javascript">
+ function remove() {
+ const frame = document.getElementById("delete-frame");
+ const div = document.getElementById("delete");
+ div.removeChild(frame);
+ }
+ </script>
+</head>
+<body>
+ <div id="delete">
+ <iframe src="deleteframe.html" id="delete-frame"></iframe>
+ </div>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webdriver/tests/support/http_handlers/__init__.py b/testing/web-platform/tests/webdriver/tests/support/http_handlers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/http_handlers/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/support/http_handlers/authentication.py b/testing/web-platform/tests/webdriver/tests/support/http_handlers/authentication.py
new file mode 100644
index 0000000000..62067dd166
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/http_handlers/authentication.py
@@ -0,0 +1,25 @@
+from urllib.parse import urlencode
+
+
+def basic_authentication(url, username=None, password=None, protocol="http"):
+ query = {}
+
+ return url("/webdriver/tests/support/http_handlers/authentication.py",
+ query=urlencode(query),
+ protocol=protocol)
+
+
+def main(request, response):
+ user = request.auth.username
+ password = request.auth.password
+
+ if user == b"user" and password == b"password":
+ return b"Authentication done"
+
+ realm = b"test"
+ if b"realm" in request.GET:
+ realm = request.GET.first(b"realm")
+
+ return ((401, b"Unauthorized"),
+ [(b"WWW-Authenticate", b'Basic realm="' + realm + b'"')],
+ b"Please login with credentials 'user' and 'password'")
diff --git a/testing/web-platform/tests/webdriver/tests/support/http_handlers/headers.py b/testing/web-platform/tests/webdriver/tests/support/http_handlers/headers.py
new file mode 100644
index 0000000000..ddae62dc6a
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/http_handlers/headers.py
@@ -0,0 +1,19 @@
+def main(request, response):
+ """Simple handler that returns a response with custom headers.
+
+ The request should define at least one "header" query parameter, with the
+ format {key}:{value}. For instance ?header=foo:bar will create a response
+ with a header with the key "foo" and the value "bar". Additional headers
+ can be set by passing more "header" query parameters.
+ """
+ response.status = 200
+ if b"header" in request.GET:
+ try:
+ headers = request.GET.get_list(b"header")
+ for header in headers:
+ header_parts = header.split(b":")
+ response.headers.set(header_parts[0], header_parts[1])
+ except ValueError:
+ pass
+
+ response.content = "HTTP Response Headers"
diff --git a/testing/web-platform/tests/webdriver/tests/support/http_handlers/redirect.py b/testing/web-platform/tests/webdriver/tests/support/http_handlers/redirect.py
new file mode 100644
index 0000000000..f2fd1ebd51
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/http_handlers/redirect.py
@@ -0,0 +1,19 @@
+def main(request, response):
+ """Simple handler that causes redirection.
+
+ The request should typically have two query parameters:
+ status - The status to use for the redirection. Defaults to 302.
+ location - The resource to redirect to.
+ """
+ status = 302
+ if b"status" in request.GET:
+ try:
+ status = int(request.GET.first(b"status"))
+ except ValueError:
+ pass
+
+ response.status = status
+
+ location = request.GET.first(b"location")
+
+ response.headers.set(b"Location", location)
diff --git a/testing/web-platform/tests/webdriver/tests/support/http_handlers/status.py b/testing/web-platform/tests/webdriver/tests/support/http_handlers/status.py
new file mode 100644
index 0000000000..4dc3de0a88
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/http_handlers/status.py
@@ -0,0 +1,16 @@
+def main(request, response):
+ """Simple handler that returns a response with a custom status.
+
+ The request expects a "status" query parameter, which should be a number.
+ If no status is provided, status 200 will be used.
+ """
+ status = 200
+ if b"status" in request.GET:
+ try:
+ status = int(request.GET.first(b"status"))
+ except ValueError:
+ pass
+
+ response.status = status
+ response.headers.set(b"Content-Type", "text/plain")
+ response.content = "HTTP Response Status"
diff --git a/testing/web-platform/tests/webdriver/tests/support/http_request.py b/testing/web-platform/tests/webdriver/tests/support/http_request.py
new file mode 100644
index 0000000000..242dc9c918
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/http_request.py
@@ -0,0 +1,40 @@
+import contextlib
+import json
+
+from http.client import HTTPConnection
+
+
+class HTTPRequest(object):
+ def __init__(self, host, port):
+ self.host = host
+ self.port = port
+
+ def head(self, path):
+ return self._request("HEAD", path)
+
+ def get(self, path):
+ return self._request("GET", path)
+
+ def post(self, path, body):
+ return self._request("POST", path, body)
+
+ @contextlib.contextmanager
+ def _request(self, method, path, body=None):
+ payload = None
+
+ if body is not None:
+ try:
+ payload = json.dumps(body)
+ except ValueError:
+ raise ValueError("Failed to encode request body as JSON: {}".format(
+ json.dumps(body, indent=2)))
+
+ if isinstance(payload, str):
+ payload = body.encode("utf-8")
+
+ conn = HTTPConnection(self.host, self.port)
+ try:
+ conn.request(method, path, payload)
+ yield conn.getresponse()
+ finally:
+ conn.close()
diff --git a/testing/web-platform/tests/webdriver/tests/support/image.py b/testing/web-platform/tests/webdriver/tests/support/image.py
new file mode 100644
index 0000000000..e4fc88f5b8
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/image.py
@@ -0,0 +1,9 @@
+import struct
+
+from tests.support.asserts import assert_png
+
+
+def png_dimensions(screenshot):
+ image = assert_png(screenshot)
+ width, height = struct.unpack(">LL", image[16:24])
+ return int(width), int(height)
diff --git a/testing/web-platform/tests/webdriver/tests/support/inline.py b/testing/web-platform/tests/webdriver/tests/support/inline.py
new file mode 100644
index 0000000000..494ca74f92
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/inline.py
@@ -0,0 +1,61 @@
+"""Helpers for inlining extracts of documents in tests."""
+
+from urllib.parse import urlencode
+
+
+BOILERPLATES = {
+ "html": "<!doctype html>\n<meta charset={charset}>\n{src}",
+ "xhtml": """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+ <head>
+ <title>XHTML might be the future</title>
+ </head>
+
+ <body>
+ {src}
+ </body>
+</html>""",
+ "xml": """<?xml version="1.0" encoding="{charset}"?>\n{src}""",
+}
+MIME_TYPES = {
+ "html": "text/html",
+ "xhtml": "application/xhtml+xml",
+ "xml": "text/xml",
+}
+
+
+def build_inline(build_url, src, doctype="html", mime=None, charset=None, **kwargs):
+ if mime is None:
+ mime = MIME_TYPES[doctype]
+ if charset is None:
+ charset = "UTF-8"
+ doc = BOILERPLATES[doctype].format(charset=charset, src=src)
+
+ query = {"doc": doc, "mime": mime, "charset": charset}
+ return build_url(
+ "/webdriver/tests/support/inline.py",
+ query=urlencode(query),
+ **kwargs)
+
+
+def main(request, response):
+ doc = request.GET.first(b"doc", None)
+ mime = request.GET.first(b"mime", None)
+ charset = request.GET.first(b"charset", None)
+
+ if doc is None:
+ return 404, [(b"Content-Type",
+ b"text/plain")], b"Missing doc parameter in query"
+
+ content_type = []
+ if mime is not None:
+ content_type.append(mime)
+ if charset is not None:
+ content_type.append(b"charset=%s" % charset)
+
+ headers = {b"X-XSS-Protection": b"0"}
+ if len(content_type) > 0:
+ headers[b"Content-Type"] = b";".join(content_type)
+
+ return 200, headers.items(), doc
diff --git a/testing/web-platform/tests/webdriver/tests/support/merge_dictionaries.py b/testing/web-platform/tests/webdriver/tests/support/merge_dictionaries.py
new file mode 100644
index 0000000000..72f1cab352
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/merge_dictionaries.py
@@ -0,0 +1,42 @@
+def merge_dictionaries(first, second):
+ """Given two dictionaries, create a third that defines all specified
+ key/value pairs. This merge_dictionaries is performed "deeply" on any nested
+ dictionaries. If a value is defined for the same key by both dictionaries,
+ an exception will be raised."""
+ result = dict(first)
+
+ for key, value in second.items():
+ if key in result and result[key] != value:
+ if isinstance(result[key], dict) and isinstance(value, dict):
+ result[key] = merge_dictionaries(result[key], value)
+ elif result[key] != value:
+ raise TypeError("merge_dictionaries: refusing to overwrite " +
+ "attribute: `%s`" % key)
+ else:
+ result[key] = value
+
+ return result
+
+if __name__ == "__main__":
+ assert merge_dictionaries({}, {}) == {}
+ assert merge_dictionaries({}, {"a": 23}) == {"a": 23}
+ assert merge_dictionaries({"a": 23}, {"b": 45}) == {"a": 23, "b": 45}
+
+ e = None
+ try:
+ merge_dictionaries({"a": 23}, {"a": 45})
+ except Exception as _e:
+ e = _e
+ assert isinstance(e, TypeError)
+
+ assert merge_dictionaries({"a": 23}, {"a": 23}) == {"a": 23}
+
+ assert merge_dictionaries({"a": {"b": 23}}, {"a": {"c": 45}}) == {"a": {"b": 23, "c": 45}}
+ assert merge_dictionaries({"a": {"b": 23}}, {"a": {"b": 23}}) == {"a": {"b": 23}}
+
+ e = None
+ try:
+ merge_dictionaries({"a": {"b": 23}}, {"a": {"b": 45}})
+ except Exception as _e:
+ e = _e
+ assert isinstance(e, TypeError)
diff --git a/testing/web-platform/tests/webdriver/tests/support/screenshot.py b/testing/web-platform/tests/webdriver/tests/support/screenshot.py
new file mode 100644
index 0000000000..374e5ed539
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/screenshot.py
@@ -0,0 +1,50 @@
+DEFAULT_CONTENT = "<div id='content'>Lorem ipsum dolor sit amet.</div>"
+
+REFERENCE_CONTENT = f"<div id='outer'>{DEFAULT_CONTENT}</div>"
+REFERENCE_STYLE = """
+ <style>
+ #outer {
+ display: block;
+ margin: 0;
+ border: 0;
+ width: 200px;
+ height: 200px;
+ }
+ #content {
+ display: block;
+ margin: 0;
+ border: 0;
+ width: 100px;
+ height: 100px;
+ background: green;
+ }
+ </style>
+"""
+
+OUTER_IFRAME_STYLE = """
+ <style>
+ iframe {
+ display: block;
+ margin: 0;
+ border: 0;
+ width: 200px;
+ height: 200px;
+ }
+ </style>
+"""
+
+INNER_IFRAME_STYLE = """
+ <style>
+ body {
+ margin: 0;
+ }
+ div {
+ display: block;
+ margin: 0;
+ border: 0;
+ width: 100px;
+ height: 100px;
+ background: green;
+ }
+ </style>
+"""
diff --git a/testing/web-platform/tests/webdriver/tests/support/sync.py b/testing/web-platform/tests/webdriver/tests/support/sync.py
new file mode 100644
index 0000000000..015ebd37a0
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/support/sync.py
@@ -0,0 +1,276 @@
+import asyncio
+import collections
+import sys
+import time
+
+from webdriver import error
+
+
+DEFAULT_TIMEOUT = 5
+DEFAULT_INTERVAL = 0.1
+
+
+class Poll(object):
+ """
+ An explicit conditional utility primitive for polling until a
+ condition evaluates to something truthy.
+
+ A `Poll` instance defines the maximum amount of time to wait
+ for a condition, as well as the frequency with which to check
+ the condition. Furthermore, the user may configure the wait
+ to ignore specific types of exceptions whilst waiting, such as
+ `error.NoSuchElementException` when searching for an element
+ on the page.
+ """
+
+ def __init__(self,
+ session,
+ timeout=DEFAULT_TIMEOUT,
+ interval=DEFAULT_INTERVAL,
+ raises=error.TimeoutException,
+ message=None,
+ ignored_exceptions=None,
+ clock=time):
+ """
+ Configure the poller to have a custom timeout, interval,
+ and list of ignored exceptions. Optionally a different time
+ implementation than the one provided by the standard library
+ (`time`) can also be provided.
+
+ Sample usage::
+
+ # Wait 30 seconds for window to open,
+ # checking for its presence once every 5 seconds.
+ from support.sync import Poll
+ wait = Poll(session, timeout=30, interval=5,
+ ignored_exceptions=error.NoSuchWindowException)
+ window = wait.until(lambda s: s.switch_to_window(42))
+
+ :param session: The input value to be provided to conditions,
+ usually a `webdriver.Session` instance.
+
+ :param timeout: How long to wait for the evaluated condition
+ to become true.
+
+ :param interval: How often the condition should be evaluated.
+ In reality the interval may be greater as the cost of
+ evaluating the condition function. If that is not the case the
+ interval for the next condition function call is shortend to keep
+ the original interval sequence as best as possible.
+
+ :param raises: Optional exception to raise when poll elapses.
+ If not used, an `error.TimeoutException` is raised.
+ If it is `None`, no exception is raised on the poll elapsing.
+
+ :param message: An optional message to include in `raises`'s
+ message if the `until` condition times out.
+
+ :param ignored_exceptions: Ignore specific types of exceptions
+ whilst waiting for the condition. Any exceptions not in this list
+ will be allowed to propagate, terminating the wait.
+
+ :param clock: Allows overriding the use of the runtime's
+ default time library.
+ """
+ self.session = session
+ self.timeout = timeout
+ self.interval = interval
+ self.exc_cls = raises
+ self.exc_msg = message
+ self.clock = clock
+
+ exceptions = []
+ if ignored_exceptions is not None:
+ if isinstance(ignored_exceptions, collections.abc.Iterable):
+ exceptions.extend(iter(ignored_exceptions))
+ else:
+ exceptions.append(ignored_exceptions)
+ self.exceptions = tuple(set(exceptions))
+
+ def until(self, condition):
+ """
+ This will repeatedly evaluate `condition` in anticipation
+ for a truthy return value, or the timeout to expire.
+
+ A condition that returns `None` or does not evaluate to
+ true will fully elapse its timeout before raising, unless
+ the `raises` keyword argument is `None`, in which case the
+ condition's return value is propagated unconditionally.
+
+ If an exception is raised in `condition` and it's not ignored,
+ this function will raise immediately. If the exception is
+ ignored it will be swallowed and polling will resume until
+ either the condition meets the return requirements or the
+ timeout duration is reached.
+
+ :param condition: A callable function whose return value will
+ be returned by this function.
+ """
+ rv = None
+ tb = None
+ start = self.clock.time()
+ end = start + self.timeout
+
+ while not self.clock.time() >= end:
+ try:
+ next = self.clock.time() + self.interval
+ rv = condition(self.session)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except self.exceptions:
+ _, _, tb = sys.exc_info()
+
+ # re-adjust the interval depending on how long
+ # the callback took to evaluate the condition
+ interval_new = max(next - self.clock.time(), 0)
+
+ if not rv:
+ self.clock.sleep(interval_new)
+ continue
+
+ if rv is not None:
+ return rv
+
+ self.clock.sleep(interval_new)
+
+ if self.exc_cls is not None:
+ elapsed = round((self.clock.time() - start), 1)
+ message = "Timed out after {} seconds".format(elapsed)
+ if self.exc_msg is not None:
+ message = "{} with message: {}".format(message, self.exc_msg)
+ raise self.exc_cls(message=message).with_traceback(tb)
+ else:
+ return rv
+
+
+class AsyncPoll(object):
+ """
+ An explicit conditional utility primitive for asynchronously polling
+ until a condition evaluates to something truthy.
+
+ A `Poll` instance defines the maximum amount of time to wait
+ for a condition, as well as the frequency with which to check
+ the condition. Furthermore, the user may configure the wait
+ to ignore specific types of exceptions whilst waiting, such as
+ `error.NoSuchElementException` when searching for an element
+ on the page.
+ """
+
+ def __init__(self,
+ session,
+ timeout=DEFAULT_TIMEOUT,
+ interval=DEFAULT_INTERVAL,
+ raises=error.TimeoutException,
+ message=None,
+ ignored_exceptions=None,
+ clock=None):
+ """
+ Configure the poller to have a custom timeout, interval,
+ and list of ignored exceptions. Optionally a different time
+ implementation than the one provided by the event loop
+ (`asyncio.get_event_loop()`) can also be provided.
+
+ Sample usage::
+
+ # Wait 30 seconds for window to open,
+ # checking for its presence once every 5 seconds.
+ from support.sync import AsyncPoll
+ wait = AsyncPoll(session, timeout=30, interval=5,
+ ignored_exceptions=error.NoSuchWindowException)
+ window = await wait.until(lambda s: s.switch_to_window(42))
+
+ :param session: The input value to be provided to conditions,
+ usually a `webdriver.Session` instance.
+
+ :param timeout: How long to wait for the evaluated condition
+ to become true.
+
+ :param interval: How often the condition should be evaluated.
+ In reality the interval may be greater as the cost of
+ evaluating the condition function. If that is not the case the
+ interval for the next condition function call is shortend to keep
+ the original interval sequence as best as possible.
+
+ :param raises: Optional exception to raise when poll elapses.
+ If not used, an `error.TimeoutException` is raised.
+ If it is `None`, no exception is raised on the poll elapsing.
+
+ :param message: An optional message to include in `raises`'s
+ message if the `until` condition times out.
+
+ :param ignored_exceptions: Ignore specific types of exceptions
+ whilst waiting for the condition. Any exceptions not in this list
+ will be allowed to propagate, terminating the wait.
+
+ :param clock: Allows overriding the use of the asyncio.get_event_loop()
+ default time implementation.
+ """
+ self.session = session
+ self.timeout = timeout
+ self.interval = interval
+ self.exc_cls = raises
+ self.exc_msg = message
+ self.clock = clock if clock is not None else asyncio.get_event_loop()
+
+ exceptions = []
+ if ignored_exceptions is not None:
+ if isinstance(ignored_exceptions, collections.abc.Iterable):
+ exceptions.extend(iter(ignored_exceptions))
+ else:
+ exceptions.append(ignored_exceptions)
+ self.exceptions = tuple(set(exceptions))
+
+ async def until(self, condition):
+ """
+ This will repeatedly evaluate `condition` in anticipation
+ for a truthy return value, or the timeout to expire.
+
+ A condition that returns `None` or does not evaluate to
+ true will fully elapse its timeout before raising, unless
+ the `raises` keyword argument is `None`, in which case the
+ condition's return value is propagated unconditionally.
+
+ If an exception is raised in `condition` and it's not ignored,
+ this function will raise immediately. If the exception is
+ ignored it will be swallowed and polling will resume until
+ either the condition meets the return requirements or the
+ timeout duration is reached.
+
+ :param condition: A callable function whose return value will
+ be returned by this function.
+ """
+ async def poll():
+ result = None
+ traceback = None
+ start = self.clock.time()
+ end = start + self.timeout
+
+ while not self.clock.time() >= end:
+ next = self.clock.time() + self.interval
+
+ try:
+ result = condition(self.session)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except self.exceptions:
+ _, _, traceback = sys.exc_info()
+
+ # re-adjust the interval depending on how long
+ # the callback took to evaluate the condition
+ interval_new = max(next - self.clock.time(), 0)
+
+ if result:
+ return result
+
+ await asyncio.sleep(interval_new)
+
+ if self.exc_cls is not None:
+ elapsed = round((self.clock.time() - start), 1)
+ message = f"Timed out after {elapsed} seconds"
+ if self.exc_msg is not None:
+ message = f"{message} with message: {self.exc_msg}"
+ raise self.exc_cls(message=message).with_traceback(traceback)
+ else:
+ return result
+
+ return await poll()
diff --git a/testing/web-platform/tests/webdriver/tests/switch_to_frame/__init__.py b/testing/web-platform/tests/webdriver/tests/switch_to_frame/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/switch_to_frame/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/switch_to_frame/cross_origin.py b/testing/web-platform/tests/webdriver/tests/switch_to_frame/cross_origin.py
new file mode 100644
index 0000000000..633eba3f42
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/switch_to_frame/cross_origin.py
@@ -0,0 +1,63 @@
+from urllib.parse import urlparse
+
+import webdriver.protocol as protocol
+
+from tests.support.asserts import assert_success
+from tests.support.helpers import document_location
+
+
+"""
+Tests that WebDriver can transcend site origins.
+
+Many modern browsers impose strict cross-origin checks,
+and WebDriver should be able to transcend these.
+
+Although an implementation detail, certain browsers
+also enforce process isolation based on site origin.
+This is known to sometimes cause problems for WebDriver implementations.
+"""
+
+
+def switch_to_frame(session, frame):
+ return session.transport.send(
+ "POST", "/session/{session_id}/frame".format(**vars(session)),
+ {"id": frame},
+ encoder=protocol.Encoder, decoder=protocol.Decoder,
+ session=session)
+
+
+def test_cross_origin_iframe(session, server_config, inline, iframe):
+ session.url = inline(iframe("", domain="alt"))
+ frame_element = session.find.css("iframe", all=False)
+
+ response = switch_to_frame(session, frame_element)
+ assert_success(response)
+
+ parse_result = urlparse(document_location(session))
+ assert parse_result.netloc != server_config["browser_host"]
+
+
+def test_nested_cross_origin_iframe(session, server_config, inline, iframe):
+ frame2 = iframe("", domain="alt", subdomain="www")
+ frame1 = iframe(frame2)
+ top_doc = inline(frame1, domain="alt")
+
+ session.url = top_doc
+
+ parse_result = urlparse(document_location(session))
+ top_level_host = parse_result.netloc
+ assert not top_level_host.startswith(server_config["browser_host"])
+
+ frame1_element = session.find.css("iframe", all=False)
+ response = switch_to_frame(session, frame1_element)
+ assert_success(response)
+
+ parse_result = urlparse(document_location(session))
+ assert parse_result.netloc.startswith(server_config["browser_host"])
+
+ frame2_el = session.find.css("iframe", all=False)
+ response = switch_to_frame(session, frame2_el)
+ assert_success(response)
+
+ parse_result = urlparse(document_location(session))
+ assert parse_result.netloc == "www.{}".format(top_level_host)
diff --git a/testing/web-platform/tests/webdriver/tests/switch_to_frame/switch.py b/testing/web-platform/tests/webdriver/tests/switch_to_frame/switch.py
new file mode 100644
index 0000000000..9ccab2c6c9
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/switch_to_frame/switch.py
@@ -0,0 +1,116 @@
+import pytest
+
+import webdriver.protocol as protocol
+
+from webdriver import NoSuchElementException
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_same_element, assert_success
+
+
+def switch_to_frame(session, frame):
+ return session.transport.send(
+ "POST", "session/{session_id}/frame".format(**vars(session)),
+ {"id": frame},
+ encoder=protocol.Encoder, decoder=protocol.Decoder,
+ session=session)
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/frame".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_null_response_value(session, inline, iframe):
+ session.url = inline(iframe("<p>foo"))
+ frame = session.find.css("iframe", all=False)
+
+ response = switch_to_frame(session, frame)
+ value = assert_success(response)
+ assert value is None
+
+
+@pytest.mark.parametrize("id", [
+ None,
+ 0,
+ {"element-6066-11e4-a52e-4f735466cecf": "foo"},
+])
+def test_no_top_browsing_context(session, url, id):
+ session.window_handle = session.new_window()
+
+ session.url = url("/webdriver/tests/support/html/frames.html")
+
+ subframe = session.find.css("#sub-frame", all=False)
+ session.switch_frame(subframe)
+
+ session.window.close()
+
+ response = switch_to_frame(session, id)
+ assert_error(response, "no such window")
+
+
+@pytest.mark.parametrize("id", [
+ None,
+ 0,
+ {"element-6066-11e4-a52e-4f735466cecf": "foo"},
+])
+def test_no_browsing_context(session, closed_frame, id):
+ response = switch_to_frame(session, id)
+ if id is None:
+ assert_success(response)
+ session.find.css("#delete", all=False)
+ else:
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context_when_already_top_level(session, closed_window):
+ response = switch_to_frame(session, None)
+ assert_error(response, "no such window")
+
+
+@pytest.mark.parametrize("value", ["foo", True, [], {}])
+def test_frame_id_invalid_types(session, value):
+ response = switch_to_frame(session, value)
+ assert_error(response, "invalid argument")
+
+
+def test_frame_id_null(session, inline, iframe):
+ session.url = inline(iframe("{}<div>foo".format(iframe("<p>bar"))))
+
+ frame1 = session.find.css("iframe", all=False)
+ session.switch_frame(frame1)
+ element1 = session.find.css("div", all=False)
+
+ frame2 = session.find.css("iframe", all=False)
+ session.switch_frame(frame2)
+ element2 = session.find.css("p", all=False)
+
+ # Switch to top-level browsing context
+ response = switch_to_frame(session, None)
+ assert_success(response)
+
+ with pytest.raises(NoSuchElementException):
+ element2.text
+ with pytest.raises(NoSuchElementException):
+ element1.text
+
+ frame = session.find.css("iframe", all=False)
+ assert_same_element(session, frame, frame1)
+
+
+def test_find_element_while_frame_is_still_loading(session, url):
+ session.timeouts.implicit = 5
+
+ frame_url = url("/webdriver/tests/support/html/subframe.html?pipe=trickle(d2)")
+ page_url = "<html><body><iframe src='{}'></iframe></body></html>".format(frame_url)
+
+ session.execute_script(
+ "document.documentElement.innerHTML = arguments[0];", args=[page_url])
+
+ frame1 = session.find.css("iframe", all=False)
+ session.switch_frame(frame1)
+
+ # Ensure that the is always a valid browsing context, and the element
+ # can be found eventually.
+ session.find.css("#delete", all=False)
diff --git a/testing/web-platform/tests/webdriver/tests/switch_to_frame/switch_number.py b/testing/web-platform/tests/webdriver/tests/switch_to_frame/switch_number.py
new file mode 100644
index 0000000000..c8858e77ff
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/switch_to_frame/switch_number.py
@@ -0,0 +1,50 @@
+import pytest
+
+import webdriver.protocol as protocol
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def switch_to_frame(session, frame):
+ return session.transport.send(
+ "POST", "session/{session_id}/frame".format(**vars(session)),
+ {"id": frame},
+ encoder=protocol.Encoder, decoder=protocol.Decoder,
+ session=session)
+
+
+@pytest.mark.parametrize("value", [-1, 2**16])
+def test_frame_id_number_out_of_bounds(session, value):
+ response = switch_to_frame(session, value)
+ assert_error(response, "invalid argument")
+
+
+@pytest.mark.parametrize("index", [1, 65535])
+def test_frame_id_number_index_out_of_bounds(session, inline, iframe, index):
+ session.url = inline(iframe("<p>foo"))
+
+ response = switch_to_frame(session, index)
+ assert_error(response, "no such frame")
+
+
+@pytest.mark.parametrize("index, value", [[0, "foo"], [1, "bar"]])
+def test_frame_id_number_index(session, inline, iframe, index, value):
+ session.url = inline("{}{}".format(iframe("<p>foo"), iframe("<p>bar")))
+
+ response = switch_to_frame(session, index)
+ assert_success(response)
+
+ element = session.find.css("p", all=False)
+ assert element.text == value
+
+
+def test_frame_id_number_index_nested(session, inline, iframe):
+ session.url = inline(iframe("{}<p>foo".format(iframe("<p>bar"))))
+
+ expected_text = ["foo", "bar"]
+ for i in range(0, len(expected_text)):
+ response = switch_to_frame(session, 0)
+ assert_success(response)
+
+ element = session.find.css("p", all=False)
+ assert element.text == expected_text[i]
diff --git a/testing/web-platform/tests/webdriver/tests/switch_to_frame/switch_webelement.py b/testing/web-platform/tests/webdriver/tests/switch_to_frame/switch_webelement.py
new file mode 100644
index 0000000000..5b57186205
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/switch_to_frame/switch_webelement.py
@@ -0,0 +1,100 @@
+import pytest
+
+import webdriver.protocol as protocol
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def switch_to_frame(session, frame):
+ return session.transport.send(
+ "POST", "session/{session_id}/frame".format(**vars(session)),
+ {"id": frame},
+ encoder=protocol.Encoder, decoder=protocol.Decoder,
+ session=session)
+
+
+def frameset(inline, *docs):
+ frames = list(map(lambda doc: "<frame src='{}'></frame>".format(inline(doc)), docs))
+ return "<frameset rows='{}'>\n{}</frameset>".format(len(frames) * "*,", "\n".join(frames))
+
+
+def test_frame_id_webelement_no_such_element(session, iframe, inline):
+ session.url = inline(iframe("<p>foo"))
+ frame = session.find.css("iframe", all=False)
+ frame.id = "bar"
+
+ response = switch_to_frame(session, frame)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_frame_id_webelement_stale_element_reference(session, iframe, stale_element, as_frame):
+ frame = stale_element(iframe("<div>"), "iframe", as_frame=as_frame)
+
+ result = switch_to_frame(session, frame)
+ assert_error(result, "stale element reference")
+
+
+def test_frame_id_webelement_no_frame_element(session, inline):
+ session.url = inline("<p>foo")
+ no_frame = session.find.css("p", all=False)
+
+ response = switch_to_frame(session, no_frame)
+ assert_error(response, "no such frame")
+
+
+@pytest.mark.parametrize("index, value", [[0, "foo"], [1, "bar"]])
+def test_frame_id_webelement_frame(session, inline, index, value):
+ session.url = inline(frameset(inline, "<p>foo", "<p>bar"))
+ frames = session.find.css("frame")
+ assert len(frames) == 2
+
+ response = switch_to_frame(session, frames[index])
+ assert_success(response)
+
+ element = session.find.css("p", all=False)
+ assert element.text == value
+
+
+@pytest.mark.parametrize("index, value", [[0, "foo"], [1, "bar"]])
+def test_frame_id_webelement_iframe(session, inline, iframe, index, value):
+ session.url = inline("{}{}".format(iframe("<p>foo"), iframe("<p>bar")))
+ frames = session.find.css("iframe")
+ assert len(frames) == 2
+
+ response = switch_to_frame(session, frames[index])
+ assert_success(response)
+
+ element = session.find.css("p", all=False)
+ assert element.text == value
+
+
+def test_frame_id_webelement_nested(session, inline, iframe):
+ session.url = inline(iframe("{}<p>foo".format(iframe("<p>bar"))))
+
+ expected_text = ["foo", "bar"]
+ for i in range(0, len(expected_text)):
+ frame_element = session.find.css("iframe", all=False)
+ response = switch_to_frame(session, frame_element)
+ assert_success(response)
+
+ element = session.find.css("p", all=False)
+ assert element.text == expected_text[i]
+
+
+def test_frame_id_webelement_cloned_into_iframe(session, inline, iframe):
+ session.url = inline(iframe("<body><p>hello world</p></body>"))
+
+ session.execute_script("""
+ const iframe = document.getElementsByTagName('iframe')[0];
+ const div = document.createElement('div');
+ div.innerHTML = 'I am a div created in top window and appended into the iframe';
+ iframe.contentWindow.document.body.appendChild(div);
+ """)
+
+ frame = session.find.css("iframe", all=False)
+ response = switch_to_frame(session, frame)
+ assert_success(response)
+
+ element = session.find.css("div", all=False)
+ assert element.text == "I am a div created in top window and appended into the iframe"
diff --git a/testing/web-platform/tests/webdriver/tests/switch_to_parent_frame/__init__.py b/testing/web-platform/tests/webdriver/tests/switch_to_parent_frame/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/switch_to_parent_frame/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/switch_to_parent_frame/switch.py b/testing/web-platform/tests/webdriver/tests/switch_to_parent_frame/switch.py
new file mode 100644
index 0000000000..9c6db8d2cd
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/switch_to_parent_frame/switch.py
@@ -0,0 +1,85 @@
+import pytest
+
+from webdriver import NoSuchElementException
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def switch_to_parent_frame(session):
+ return session.transport.send(
+ "POST", "session/{session_id}/frame/parent".format(**vars(session)))
+
+
+def test_null_response_value(session, inline, iframe):
+ session.url = inline(iframe("<p>foo"))
+ frame_element = session.find.css("iframe", all=False)
+ session.switch_frame(frame_element)
+
+ response = switch_to_parent_frame(session)
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session, url):
+ session.window_handle = session.new_window()
+
+ session.url = url("/webdriver/tests/support/html/frames.html")
+
+ subframe = session.find.css("#sub-frame", all=False)
+ session.switch_frame(subframe)
+
+ session.window.close()
+
+ response = switch_to_parent_frame(session)
+ assert_error(response, "no such window")
+
+
+def test_no_parent_browsing_context(session, url):
+ session.url = url("/webdriver/tests/support/html/frames.html")
+
+ subframe = session.find.css("#sub-frame", all=False)
+ session.switch_frame(subframe)
+
+ deleteframe = session.find.css("#delete-frame", all=False)
+ session.switch_frame(deleteframe)
+
+ button = session.find.css("#remove-top", all=False)
+ button.click()
+
+ response = switch_to_parent_frame(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame):
+ response = switch_to_parent_frame(session)
+ assert_success(response)
+
+ session.find.css("#delete", all=False)
+
+
+def test_no_browsing_context_when_already_top_level(session, closed_window):
+ response = switch_to_parent_frame(session)
+ assert_error(response, "no such window")
+
+
+def test_switch_from_iframe(session, inline, iframe):
+ session.url = inline(iframe("<p>foo"))
+ frame_element = session.find.css("iframe", all=False)
+ session.switch_frame(frame_element)
+ element = session.find.css("p", all=False)
+
+ result = switch_to_parent_frame(session)
+ assert_success(result)
+
+ with pytest.raises(NoSuchElementException):
+ element.text
+
+
+def test_switch_from_top_level(session, inline):
+ session.url = inline("<p>foo")
+ element = session.find.css("p", all=False)
+
+ result = switch_to_parent_frame(session)
+ assert_success(result)
+
+ assert element.text == "foo"
diff --git a/testing/web-platform/tests/webdriver/tests/switch_to_window/__init__.py b/testing/web-platform/tests/webdriver/tests/switch_to_window/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/switch_to_window/__init__.py
diff --git a/testing/web-platform/tests/webdriver/tests/switch_to_window/alerts.py b/testing/web-platform/tests/webdriver/tests/switch_to_window/alerts.py
new file mode 100644
index 0000000000..2fc390e864
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/switch_to_window/alerts.py
@@ -0,0 +1,33 @@
+import pytest
+
+from webdriver import error
+
+from tests.support.asserts import assert_success
+
+
+def switch_to_window(session, handle):
+ return session.transport.send(
+ "POST", "session/{session_id}/window".format(**vars(session)),
+ {"handle": handle})
+
+
+def test_retain_tab_modal_status(session):
+ handle = session.window_handle
+
+ new_handle = session.new_window()
+ response = switch_to_window(session, new_handle)
+ assert_success(response)
+
+ session.execute_script("window.alert('Hello');")
+ assert session.alert.text == "Hello"
+
+ response = switch_to_window(session, handle)
+ assert_success(response)
+
+ with pytest.raises(error.NoSuchAlertException):
+ session.alert.text == "Hello"
+
+ response = switch_to_window(session, new_handle)
+ assert_success(response)
+
+ assert session.alert.text == "Hello"
diff --git a/testing/web-platform/tests/webdriver/tests/switch_to_window/switch.py b/testing/web-platform/tests/webdriver/tests/switch_to_window/switch.py
new file mode 100644
index 0000000000..28d432a8b5
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/switch_to_window/switch.py
@@ -0,0 +1,100 @@
+import pytest
+
+from webdriver.error import NoSuchElementException, NoSuchAlertException
+from webdriver.transport import Response
+
+from tests.support.asserts import assert_error, assert_success
+
+
+def switch_to_window(session, handle):
+ return session.transport.send(
+ "POST", "session/{session_id}/window".format(**vars(session)),
+ {"handle": handle})
+
+
+def test_null_parameter_value(session, http):
+ path = "/session/{session_id}/window".format(**vars(session))
+ with http.post(path, None) as response:
+ assert_error(Response.from_http(response), "invalid argument")
+
+
+def test_null_response_value(session):
+ response = switch_to_window(session, session.new_window())
+ value = assert_success(response)
+ assert value is None
+
+
+def test_no_top_browsing_context(session):
+ original_handle = session.window_handle
+ new_handle = session.new_window()
+
+ session.window.close()
+ assert original_handle not in session.handles, "Unable to close window"
+
+ response = switch_to_window(session, new_handle)
+ assert_success(response)
+
+ assert session.window_handle == new_handle
+
+
+def test_no_browsing_context(session, url):
+ new_handle = session.new_window()
+
+ session.url = url("/webdriver/tests/support/html/frames.html")
+ subframe = session.find.css("#sub-frame", all=False)
+ session.switch_frame(subframe)
+
+ deleteframe = session.find.css("#delete-frame", all=False)
+ session.switch_frame(deleteframe)
+
+ button = session.find.css("#remove-parent", all=False)
+ button.click()
+
+ response = switch_to_window(session, new_handle)
+ assert_success(response)
+
+ assert session.window_handle == new_handle
+
+
+def test_switch_to_window_sets_top_level_context(session, inline, iframe):
+ session.url = inline(iframe("<p>foo"))
+
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+ session.find.css("p", all=False)
+
+ response = switch_to_window(session, session.window_handle)
+ assert_success(response)
+
+ session.find.css("iframe", all=False)
+
+
+def test_element_not_found_after_tab_switch(session, inline):
+ session.url = inline("<p id='a'>foo")
+ paragraph = session.find.css("p", all=False)
+
+ session.window_handle = session.new_window(type_hint="tab")
+
+ with pytest.raises(NoSuchElementException):
+ paragraph.attribute("id")
+
+
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_finds_exising_user_prompt_after_tab_switch(session, dialog_type):
+ original_handle = session.window_handle
+ new_handle = session.new_window()
+
+ session.execute_script("{}('foo');".format(dialog_type))
+
+ response = switch_to_window(session, new_handle)
+ assert_success(response)
+
+ with pytest.raises(NoSuchAlertException):
+ session.alert.text
+
+ session.window.close()
+
+ response = switch_to_window(session, original_handle)
+ assert_success(response)
+
+ session.alert.accept()
diff --git a/testing/web-platform/tests/webdriver/tests/take_element_screenshot/__init__.py b/testing/web-platform/tests/webdriver/tests/take_element_screenshot/__init__.py
new file mode 100644
index 0000000000..9a82cc48ea
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/take_element_screenshot/__init__.py
@@ -0,0 +1,10 @@
+def element_dimensions(session, element):
+ return tuple(session.execute_script("""
+ const {devicePixelRatio} = window;
+ let {width, height} = arguments[0].getBoundingClientRect();
+
+ return [
+ Math.floor(width * devicePixelRatio),
+ Math.floor(height * devicePixelRatio),
+ ];
+ """, args=(element,)))
diff --git a/testing/web-platform/tests/webdriver/tests/take_element_screenshot/iframe.py b/testing/web-platform/tests/webdriver/tests/take_element_screenshot/iframe.py
new file mode 100644
index 0000000000..e7f1b0c805
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/take_element_screenshot/iframe.py
@@ -0,0 +1,121 @@
+import pytest
+
+from tests.support.asserts import assert_success
+from tests.support.image import png_dimensions
+
+from . import element_dimensions
+
+DEFAULT_CONTENT = "<div id='content'>Lorem ipsum dolor sit amet.</div>"
+
+REFERENCE_CONTENT = "<div id='outer'>{}</div>".format(DEFAULT_CONTENT)
+REFERENCE_STYLE = """
+ <style>
+ #outer {
+ display: block;
+ margin: 0;
+ border: 0;
+ width: 200px;
+ height: 200px;
+ }
+ #content {
+ display: block;
+ margin: 0;
+ border: 0;
+ width: 100px;
+ height: 100px;
+ background: green;
+ }
+ </style>
+"""
+
+OUTER_IFRAME_STYLE = """
+ <style>
+ iframe {
+ display: block;
+ margin: 0;
+ border: 0;
+ width: 200px;
+ height: 200px;
+ }
+ </style>
+"""
+
+INNER_IFRAME_STYLE = """
+ <style>
+ body {
+ margin: 0;
+ }
+ div {
+ display: block;
+ margin: 0;
+ border: 0;
+ width: 100px;
+ height: 100px;
+ background: green;
+ }
+ </style>
+"""
+
+
+def take_element_screenshot(session, element_id):
+ return session.transport.send(
+ "GET",
+ "session/{session_id}/element/{element_id}/screenshot".format(
+ session_id=session.session_id,
+ element_id=element_id,
+ )
+ )
+
+
+def test_frame_element(session, inline, iframe):
+ # Create a reference element which looks exactly like the frame's content
+ session.url = inline("{0}{1}".format(REFERENCE_STYLE, REFERENCE_CONTENT))
+
+ # Capture the inner content as reference image
+ ref_el = session.find.css("#content", all=False)
+ ref_screenshot = ref_el.screenshot()
+ ref_dimensions = element_dimensions(session, ref_el)
+
+ assert png_dimensions(ref_screenshot) == ref_dimensions
+
+ # Capture the frame's element
+ iframe_content = "{0}{1}".format(INNER_IFRAME_STYLE, DEFAULT_CONTENT)
+ session.url = inline("""{0}{1}""".format(OUTER_IFRAME_STYLE, iframe(iframe_content)))
+
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+ div = session.find.css("div", all=False)
+ div_dimensions = element_dimensions(session, div)
+ assert div_dimensions == ref_dimensions
+
+ response = take_element_screenshot(session, div.id)
+ div_screenshot = assert_success(response)
+
+ assert png_dimensions(div_screenshot) == ref_dimensions
+ assert div_screenshot == ref_screenshot
+
+
+@pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"])
+def test_source_origin(session, inline, iframe, domain):
+ # Create a reference element which looks exactly like the iframe
+ session.url = inline("{0}{1}".format(REFERENCE_STYLE, REFERENCE_CONTENT))
+
+ div = session.find.css("div", all=False)
+ div_dimensions = element_dimensions(session, div)
+
+ response = take_element_screenshot(session, div.id)
+ reference_screenshot = assert_success(response)
+ assert png_dimensions(reference_screenshot) == div_dimensions
+
+ iframe_content = "{0}{1}".format(INNER_IFRAME_STYLE, DEFAULT_CONTENT)
+ session.url = inline("""{0}{1}""".format(
+ OUTER_IFRAME_STYLE, iframe(iframe_content, domain=domain)))
+
+ frame_element = session.find.css("iframe", all=False)
+ frame_dimensions = element_dimensions(session, frame_element)
+
+ response = take_element_screenshot(session, frame_element.id)
+ screenshot = assert_success(response)
+ assert png_dimensions(screenshot) == frame_dimensions
+
+ assert screenshot == reference_screenshot
diff --git a/testing/web-platform/tests/webdriver/tests/take_element_screenshot/screenshot.py b/testing/web-platform/tests/webdriver/tests/take_element_screenshot/screenshot.py
new file mode 100644
index 0000000000..a1fcfac5f4
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/take_element_screenshot/screenshot.py
@@ -0,0 +1,90 @@
+import pytest
+
+from webdriver import Element
+
+from tests.support.asserts import assert_error, assert_success
+from tests.support.image import png_dimensions
+from . import element_dimensions
+
+
+def take_element_screenshot(session, element_id):
+ return session.transport.send(
+ "GET",
+ "session/{session_id}/element/{element_id}/screenshot".format(
+ session_id=session.session_id,
+ element_id=element_id,
+ )
+ )
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = take_element_screenshot(session, "foo")
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame, inline):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ response = take_element_screenshot(session, element.id)
+ screenshot = assert_success(response)
+
+ assert png_dimensions(screenshot) == element_dimensions(session, element)
+
+
+def test_no_such_element_with_invalid_value(session):
+ element = Element("foo", session)
+
+ response = take_element_screenshot(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_window_handle(session, inline, closed):
+ session.url = inline("<div id='parent'><p/>")
+ element = session.find.css("#parent", all=False)
+
+ new_handle = session.new_window()
+
+ if closed:
+ session.window.close()
+
+ session.window_handle = new_handle
+
+ response = take_element_screenshot(session, element.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("closed", [False, True], ids=["open", "closed"])
+def test_no_such_element_from_other_frame(session, url, closed):
+ session.url = url("/webdriver/tests/support/html/subframe.html")
+
+ frame = session.find.css("#delete-frame", all=False)
+ session.switch_frame(frame)
+
+ button = session.find.css("#remove-parent", all=False)
+ if closed:
+ button.click()
+
+ session.switch_frame("parent")
+
+ response = take_element_screenshot(session, button.id)
+ assert_error(response, "no such element")
+
+
+@pytest.mark.parametrize("as_frame", [False, True], ids=["top_context", "child_context"])
+def test_stale_element_reference(session, stale_element, as_frame):
+ element = stale_element("<input>", "input", as_frame=as_frame)
+
+ result = take_element_screenshot(session, element.id)
+ assert_error(result, "stale element reference")
+
+
+def test_format_and_dimensions(session, inline):
+ session.url = inline("<input>")
+ element = session.find.css("input", all=False)
+
+ response = take_element_screenshot(session, element.id)
+ screenshot = assert_success(response)
+
+ assert png_dimensions(screenshot) == element_dimensions(session, element)
diff --git a/testing/web-platform/tests/webdriver/tests/take_element_screenshot/user_prompts.py b/testing/web-platform/tests/webdriver/tests/take_element_screenshot/user_prompts.py
new file mode 100644
index 0000000000..39fefe9325
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/take_element_screenshot/user_prompts.py
@@ -0,0 +1,121 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_png, assert_success
+
+
+def take_element_screenshot(session, element_id):
+ return session.transport.send(
+ "GET",
+ "session/{session_id}/element/{element_id}/screenshot".format(
+ session_id=session.session_id,
+ element_id=element_id,
+ )
+ )
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input/>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = take_element_screenshot(session, element.id)
+ value = assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert_png(value)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input/>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = take_element_screenshot(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input/>")
+ element = session.find.css("input", all=False)
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = take_element_screenshot(session, element.id)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
diff --git a/testing/web-platform/tests/webdriver/tests/take_screenshot/__init__.py b/testing/web-platform/tests/webdriver/tests/take_screenshot/__init__.py
new file mode 100644
index 0000000000..f3001d946d
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/take_screenshot/__init__.py
@@ -0,0 +1,21 @@
+def element_dimensions(session, element):
+ return tuple(session.execute_script("""
+ const {devicePixelRatio} = window;
+ let {width, height} = arguments[0].getBoundingClientRect();
+
+ return [
+ Math.floor(width * devicePixelRatio),
+ Math.floor(height * devicePixelRatio),
+ ];
+ """, args=(element,)))
+
+
+def viewport_dimensions(session):
+ return tuple(session.execute_script("""
+ const {devicePixelRatio, innerHeight, innerWidth} = window;
+
+ return [
+ Math.floor(innerWidth * devicePixelRatio),
+ Math.floor(innerHeight * devicePixelRatio)
+ ];
+ """))
diff --git a/testing/web-platform/tests/webdriver/tests/take_screenshot/iframe.py b/testing/web-platform/tests/webdriver/tests/take_screenshot/iframe.py
new file mode 100644
index 0000000000..133692bc7e
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/take_screenshot/iframe.py
@@ -0,0 +1,54 @@
+import pytest
+from tests.support.asserts import assert_success
+from tests.support.image import png_dimensions
+from tests.support.screenshot import (
+ DEFAULT_CONTENT,
+ INNER_IFRAME_STYLE,
+ OUTER_IFRAME_STYLE,
+ REFERENCE_CONTENT,
+ REFERENCE_STYLE,
+)
+
+from . import viewport_dimensions
+
+
+def take_screenshot(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/screenshot".format(**vars(session)))
+
+
+def test_always_captures_top_browsing_context(session, inline, iframe):
+ iframe_content = "{0}{1}".format(INNER_IFRAME_STYLE, DEFAULT_CONTENT)
+ session.url = inline("""{0}{1}""".format(OUTER_IFRAME_STYLE, iframe(iframe_content)))
+
+ response = take_screenshot(session)
+ reference_screenshot = assert_success(response)
+ assert png_dimensions(reference_screenshot) == viewport_dimensions(session)
+
+ frame = session.find.css("iframe", all=False)
+ session.switch_frame(frame)
+
+ response = take_screenshot(session)
+ screenshot = assert_success(response)
+
+ assert png_dimensions(screenshot) == png_dimensions(reference_screenshot)
+ assert screenshot == reference_screenshot
+
+
+@pytest.mark.parametrize("domain", ["", "alt"], ids=["same_origin", "cross_origin"])
+def test_source_origin(session, inline, iframe, domain):
+ session.url = inline("{0}{1}".format(REFERENCE_STYLE, REFERENCE_CONTENT))
+
+ response = take_screenshot(session)
+ reference_screenshot = assert_success(response)
+ assert png_dimensions(reference_screenshot) == viewport_dimensions(session)
+
+ iframe_content = "{0}{1}".format(INNER_IFRAME_STYLE, DEFAULT_CONTENT)
+ session.url = inline("""{0}{1}""".format(
+ OUTER_IFRAME_STYLE, iframe(iframe_content, domain=domain)))
+
+ response = take_screenshot(session)
+ screenshot = assert_success(response)
+ assert png_dimensions(screenshot) == viewport_dimensions(session)
+
+ assert screenshot == reference_screenshot
diff --git a/testing/web-platform/tests/webdriver/tests/take_screenshot/screenshot.py b/testing/web-platform/tests/webdriver/tests/take_screenshot/screenshot.py
new file mode 100644
index 0000000000..9e71a633c7
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/take_screenshot/screenshot.py
@@ -0,0 +1,34 @@
+from tests.support.asserts import assert_error, assert_png, assert_success
+from tests.support.image import png_dimensions
+
+from . import viewport_dimensions
+
+
+def take_screenshot(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/screenshot".format(**vars(session)))
+
+
+def test_no_top_browsing_context(session, closed_window):
+ response = take_screenshot(session)
+ assert_error(response, "no such window")
+
+
+def test_no_browsing_context(session, closed_frame, inline):
+ session.url = inline("<input>")
+
+ response = take_screenshot(session)
+ value = assert_success(response)
+
+ assert_png(value)
+ assert png_dimensions(value) == viewport_dimensions(session)
+
+
+def test_format_and_dimensions(session, inline):
+ session.url = inline("<input>")
+
+ response = take_screenshot(session)
+ value = assert_success(response)
+
+ assert_png(value)
+ assert png_dimensions(value) == viewport_dimensions(session)
diff --git a/testing/web-platform/tests/webdriver/tests/take_screenshot/user_prompts.py b/testing/web-platform/tests/webdriver/tests/take_screenshot/user_prompts.py
new file mode 100644
index 0000000000..7d57f8f271
--- /dev/null
+++ b/testing/web-platform/tests/webdriver/tests/take_screenshot/user_prompts.py
@@ -0,0 +1,113 @@
+# META: timeout=long
+
+import pytest
+
+from tests.support.asserts import assert_dialog_handled, assert_error, assert_png, assert_success
+
+
+def take_screenshot(session):
+ return session.transport.send(
+ "GET", "session/{session_id}/screenshot".format(**vars(session)))
+
+
+@pytest.fixture
+def check_user_prompt_closed_without_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_without_exception(dialog_type, retval):
+ session.url = inline("<input/>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = take_screenshot(session)
+ value = assert_success(response)
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ assert_png(value)
+
+ return check_user_prompt_closed_without_exception
+
+
+@pytest.fixture
+def check_user_prompt_closed_with_exception(session, create_dialog, inline):
+ def check_user_prompt_closed_with_exception(dialog_type, retval):
+ session.url = inline("<input/>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = take_screenshot(session)
+ assert_error(response, "unexpected alert open")
+
+ assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
+
+ return check_user_prompt_closed_with_exception
+
+
+@pytest.fixture
+def check_user_prompt_not_closed_but_exception(session, create_dialog, inline):
+ def check_user_prompt_not_closed_but_exception(dialog_type):
+ session.url = inline("<input/>")
+
+ create_dialog(dialog_type, text=dialog_type)
+
+ response = take_screenshot(session)
+ assert_error(response, "unexpected alert open")
+
+ assert session.alert.text == dialog_type
+ session.alert.dismiss()
+
+ return check_user_prompt_not_closed_but_exception
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", True),
+ ("prompt", ""),
+])
+def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
+ check_user_prompt_closed_without_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)
+
+
+@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
+@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
+def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
+ check_user_prompt_not_closed_but_exception(dialog_type)
+
+
+@pytest.mark.parametrize("dialog_type, retval", [
+ ("alert", None),
+ ("confirm", False),
+ ("prompt", None),
+])
+def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
+ check_user_prompt_closed_with_exception(dialog_type, retval)