diff --git a/__tests__/cardActionMiddleware.js b/__tests__/cardActionMiddleware.js index 65e3c080e5..cd7e6760e8 100644 --- a/__tests__/cardActionMiddleware.js +++ b/__tests__/cardActionMiddleware.js @@ -4,7 +4,7 @@ import { imageSnapshotOptions, timeouts } from './constants.json'; import allOutgoingActivitiesSent from './setup/conditions/allOutgoingActivitiesSent'; import minNumActivitiesShown from './setup/conditions/minNumActivitiesShown.js'; -import suggestedActionsShowed from './setup/conditions/suggestedActionsShowed'; +import suggestedActionsShown from './setup/conditions/suggestedActionsShown'; import uiConnected from './setup/conditions/uiConnected'; // selenium-webdriver API doc: @@ -31,7 +31,7 @@ test('card action "openUrl"', async () => { await driver.wait(uiConnected(), timeouts.directLine); await pageObjects.sendMessageViaSendBox('card-actions', { waitForSend: true }); - await driver.wait(suggestedActionsShowed(), timeouts.directLine); + await driver.wait(suggestedActionsShown(), timeouts.directLine); const openUrlButton = await driver.findElement(By.css('[role="form"] ul > li:first-child button')); diff --git a/__tests__/clockSkew.js b/__tests__/clockSkew.js index bf1fc6032b..7bc1b5b0a8 100644 --- a/__tests__/clockSkew.js +++ b/__tests__/clockSkew.js @@ -84,7 +84,7 @@ describe('Clock skew', () => { // Make sure the clock skew is set correctly. // If it is not set, the result could be false-positive. - expect(pageObjects.getStore()).resolves.toHaveProperty('clockSkewAdjustment', 120000); + await expect(pageObjects.getStore()).resolves.toHaveProperty('clockSkewAdjustment', 120000); await pageObjects.sendMessageViaSendBox('echo This outgoing activity should be the last in the list.', { waitForSend: false @@ -94,7 +94,7 @@ describe('Clock skew', () => { const lastActivity = await driver.findElement(By.css('[role="list"] > li:last-child p')); - expect(lastActivity.getText()).resolves.toBe('echo This outgoing activity should be the last in the list.'); + await expect(lastActivity.getText()).resolves.toBe('echo This outgoing activity should be the last in the list.'); // Skip the echoback for 2nd user-originated activity, so we don't apply server timestamp to it. It will be visually appear as "sending". // Even the 2nd user-originated activity didn't apply server timestamp, the insertion-sort algorithm should put bot-originated activity below it. diff --git a/__tests__/focus.js b/__tests__/focus.js new file mode 100644 index 0000000000..780c38f738 --- /dev/null +++ b/__tests__/focus.js @@ -0,0 +1,48 @@ +import { Key } from 'selenium-webdriver'; + +import { timeouts } from './constants.json'; +import sendBoxTextBoxFocused from './setup/conditions/sendBoxTextBoxFocused'; +import suggestedActionsShown from './setup/conditions/suggestedActionsShown'; +import uiConnected from './setup/conditions/uiConnected'; + +// selenium-webdriver API doc: +// https://seleniumhq.github.io/selenium/docs/api/javascript/module/selenium-webdriver/index_exports_WebDriver.html + +jest.setTimeout(timeouts.test); + +// Verification of fix of #1971, https://github.com/microsoft/BotFramework-WebChat/issues/1971 +test('should not focus send box after clicking on send button', async () => { + const { driver, pageObjects } = await setupWebDriver(); + + await driver.wait(uiConnected(), timeouts.directLine); + + await pageObjects.typeOnSendBox('echo 123'); + await pageObjects.clickSendButton(); + + await expect(sendBoxTextBoxFocused().fn(driver)).resolves.toBeFalsy(); +}); + +// Verification of fix of #1971, https://github.com/microsoft/BotFramework-WebChat/issues/1971 +test('should not focus send box after clicking on suggested actions', async () => { + const { driver, pageObjects } = await setupWebDriver(); + + await driver.wait(uiConnected(), timeouts.directLine); + await pageObjects.sendMessageViaSendBox('suggested-actions'); + + await driver.wait(suggestedActionsShown(), timeouts.directLine); + + await pageObjects.clickSuggestedActionButton(0); + + await expect(sendBoxTextBoxFocused().fn(driver)).resolves.toBeFalsy(); +}); + +// Verification of fix of #1971, https://github.com/microsoft/BotFramework-WebChat/issues/1971 +test('should focus send box after pressing ENTER to send message', async () => { + const { driver, pageObjects } = await setupWebDriver(); + + await driver.wait(uiConnected(), timeouts.directLine); + + await pageObjects.typeOnSendBox('echo 123', Key.RETURN); + + await expect(sendBoxTextBoxFocused().fn(driver)).resolves.toBeTruthy(); +}); diff --git a/__tests__/inputHint.js b/__tests__/inputHint.js index 4e1e915935..cdb54e89d5 100644 --- a/__tests__/inputHint.js +++ b/__tests__/inputHint.js @@ -1,8 +1,8 @@ import { timeouts } from './constants.json'; -import isRecognizingSpeech from './setup/pageObjects/isRecognizingSpeech'; import minNumActivitiesShown from './setup/conditions/minNumActivitiesShown'; -import speechSynthesisPending from './setup/conditions/speechSynthesisPending'; +import speechRecognitionStartCalled from './setup/conditions/speechRecognitionStartCalled'; +import speechSynthesisUtterancePended from './setup/conditions/speechSynthesisUtterancePended'; import uiConnected from './setup/conditions/uiConnected'; // selenium-webdriver API doc: @@ -25,11 +25,11 @@ describe('input hint', () => { await driver.wait(minNumActivitiesShown(2), timeouts.directLine); - await driver.wait(speechSynthesisPending(), timeouts.ui); + await driver.wait(speechSynthesisUtterancePended(), timeouts.ui); await pageObjects.startSpeechSynthesize(); await pageObjects.endSpeechSynthesize(); - expect(isRecognizingSpeech(driver)).resolves.toBeTruthy(); + await expect(speechRecognitionStartCalled().fn(driver)).resolves.toBeTruthy(); }); test('should not turn on microphone if initiated via typing', async () => { @@ -45,7 +45,7 @@ describe('input hint', () => { await driver.wait(minNumActivitiesShown(2), timeouts.directLine); - expect(isRecognizingSpeech(driver)).resolves.toBeFalsy(); + await expect(speechRecognitionStartCalled().fn(driver)).resolves.toBeFalsy(); }); }); @@ -63,11 +63,11 @@ describe('input hint', () => { await driver.wait(minNumActivitiesShown(2), timeouts.directLine); - await driver.wait(speechSynthesisPending(), timeouts.ui); + await driver.wait(speechSynthesisUtterancePended(), timeouts.ui); await pageObjects.startSpeechSynthesize(); await pageObjects.endSpeechSynthesize(); - expect(isRecognizingSpeech(driver)).resolves.toBeFalsy(); + await expect(speechRecognitionStartCalled().fn(driver)).resolves.toBeFalsy(); }); test('should not turn on microphone if initiated via typing', async () => { @@ -83,7 +83,7 @@ describe('input hint', () => { await driver.wait(minNumActivitiesShown(2), timeouts.directLine); - expect(isRecognizingSpeech(driver)).resolves.toBeFalsy(); + await expect(speechRecognitionStartCalled().fn(driver)).resolves.toBeFalsy(); }); }); @@ -101,11 +101,11 @@ describe('input hint', () => { await driver.wait(minNumActivitiesShown(2), timeouts.directLine); - await driver.wait(speechSynthesisPending(), timeouts.ui); + await driver.wait(speechSynthesisUtterancePended(), timeouts.ui); await pageObjects.startSpeechSynthesize(); await pageObjects.endSpeechSynthesize(); - expect(isRecognizingSpeech(driver)).resolves.toBeFalsy(); + await expect(speechRecognitionStartCalled().fn(driver)).resolves.toBeFalsy(); }); test('should turn off microphone if initiated via typing', async () => { @@ -121,7 +121,7 @@ describe('input hint', () => { await driver.wait(minNumActivitiesShown(2), timeouts.directLine); - expect(isRecognizingSpeech(driver)).resolves.toBeFalsy(); + await expect(speechRecognitionStartCalled().fn(driver)).resolves.toBeFalsy(); }); }); @@ -139,11 +139,11 @@ describe('input hint', () => { await driver.wait(minNumActivitiesShown(2), timeouts.directLine); - await driver.wait(speechSynthesisPending(), timeouts.ui); + await driver.wait(speechSynthesisUtterancePended(), timeouts.ui); await pageObjects.startSpeechSynthesize(); await pageObjects.endSpeechSynthesize(); - expect(isRecognizingSpeech(driver)).resolves.toBeFalsy(); + await expect(speechRecognitionStartCalled().fn(driver)).resolves.toBeFalsy(); }); test('should not turn on microphone if initiated via typing', async () => { @@ -159,7 +159,7 @@ describe('input hint', () => { await driver.wait(minNumActivitiesShown(2), timeouts.directLine); - expect(isRecognizingSpeech(driver)).resolves.toBeFalsy(); + await expect(speechRecognitionStartCalled().fn(driver)).resolves.toBeFalsy(); }); }); }); diff --git a/__tests__/scrollToBottom.js b/__tests__/scrollToBottom.js index 436185be9e..9cbfd3b117 100644 --- a/__tests__/scrollToBottom.js +++ b/__tests__/scrollToBottom.js @@ -4,7 +4,7 @@ import { imageSnapshotOptions, timeouts } from './constants.json'; import minNumActivitiesShown from './setup/conditions/minNumActivitiesShown'; import scrollToBottomCompleted from './setup/conditions/scrollToBottomCompleted'; -import suggestedActionsShowed from './setup/conditions/suggestedActionsShowed'; +import suggestedActionsShown from './setup/conditions/suggestedActionsShown'; import uiConnected from './setup/conditions/uiConnected'; // selenium-webdriver API doc: @@ -21,7 +21,7 @@ test('should stick to bottom if submitting an Adaptive Card while suggested acti await driver.wait(minNumActivitiesShown(2), timeouts.directLine); await pageObjects.sendMessageViaSendBox('suggested-actions', { waitForSend: true }); - await driver.wait(suggestedActionsShowed(), timeouts.directLine); + await driver.wait(suggestedActionsShown(), timeouts.directLine); await driver.wait(scrollToBottomCompleted(), timeouts.scrollToBottom); const submitButton = await driver.findElement(By.css('button.ac-pushButton:nth-of-type(2)')); diff --git a/__tests__/setup/conditions/negate.js b/__tests__/setup/conditions/negate.js new file mode 100644 index 0000000000..f0fb20dcab --- /dev/null +++ b/__tests__/setup/conditions/negate.js @@ -0,0 +1,5 @@ +import { Condition } from 'selenium-webdriver'; + +export default function negateCondition(condition) { + return new Condition(`Negate of ${condition.name}`, async (...args) => !(await condition.fn(...args))); +} diff --git a/__tests__/setup/conditions/sendBoxTextBoxFocused.js b/__tests__/setup/conditions/sendBoxTextBoxFocused.js new file mode 100644 index 0000000000..6e73172bb0 --- /dev/null +++ b/__tests__/setup/conditions/sendBoxTextBoxFocused.js @@ -0,0 +1,22 @@ +import { By, Condition } from 'selenium-webdriver'; + +import getSendBoxTextBox, { CSS_SELECTOR } from '../elements/getSendBoxTextBox'; + +export default function sendBoxTextBoxFocused() { + return new Condition('Send box text box to be focused', async driver => { + // Make sure the send box text box is visible + await getSendBoxTextBox(driver); + + try { + await driver.findElement(By.css(CSS_SELECTOR + ':focus')); + + return true; + } catch (err) { + if (err.name === 'NoSuchElementError') { + return false; + } + + throw err; + } + }); +} diff --git a/__tests__/setup/conditions/speechRecognitionStartCalled.js b/__tests__/setup/conditions/speechRecognitionStartCalled.js new file mode 100644 index 0000000000..15b8f44c4e --- /dev/null +++ b/__tests__/setup/conditions/speechRecognitionStartCalled.js @@ -0,0 +1,9 @@ +import { Condition } from 'selenium-webdriver'; + +// Checks if Web Chat has called "speechRecognition.start()" and pending for a series of speech events. + +export default function speechRecognitionStartCalled() { + return new Condition('SpeechRecognition.start to be called', driver => + driver.executeScript(() => window.WebSpeechMock.speechRecognitionStartCalled()) + ); +} diff --git a/__tests__/setup/conditions/speechRecognitionStarted.js b/__tests__/setup/conditions/speechRecognitionStarted.js deleted file mode 100644 index 68790ce606..0000000000 --- a/__tests__/setup/conditions/speechRecognitionStarted.js +++ /dev/null @@ -1,15 +0,0 @@ -import { Condition } from 'selenium-webdriver'; - -import isRecognizingSpeech from '../pageObjects/isRecognizingSpeech'; - -export default function speechRecognitionStarted() { - return new Condition('Speech recognition to start', async driver => await isRecognizingSpeech(driver)); -} - -function negate() { - const condition = speechRecognitionStarted(); - - return new Condition('Speech recognition not started', async driver => !(await condition.fn(driver))); -} - -export { negate }; diff --git a/__tests__/setup/conditions/speechSynthesisPending.js b/__tests__/setup/conditions/speechSynthesisPending.js deleted file mode 100644 index a6a8be4b99..0000000000 --- a/__tests__/setup/conditions/speechSynthesisPending.js +++ /dev/null @@ -1,15 +0,0 @@ -import { Condition } from 'selenium-webdriver'; - -import hasPendingSpeechSynthesisUtterance from '../pageObjects/hasPendingSpeechSynthesisUtterance'; - -export default function speechSynthesisPending() { - return new Condition('Speech synthesis is pending', async driver => await hasPendingSpeechSynthesisUtterance(driver)); -} - -function negate() { - const condition = speechSynthesisPending(); - - return new Condition('Speech synthesis is not pending', async driver => !(await condition.fn(driver))); -} - -export { negate }; diff --git a/__tests__/setup/conditions/speechSynthesisUtterancePended.js b/__tests__/setup/conditions/speechSynthesisUtterancePended.js new file mode 100644 index 0000000000..970e81b3eb --- /dev/null +++ b/__tests__/setup/conditions/speechSynthesisUtterancePended.js @@ -0,0 +1,7 @@ +import { Condition } from 'selenium-webdriver'; + +export default function speechSynthesisUtterancePended() { + return new Condition('Speech synthesis utterance to be pended to synthesize', driver => + driver.executeScript(() => window.WebSpeechMock.speechSynthesisUtterancePended()) + ); +} diff --git a/__tests__/setup/conditions/suggestedActionsShowed.js b/__tests__/setup/conditions/suggestedActionsShown.js similarity index 68% rename from __tests__/setup/conditions/suggestedActionsShowed.js rename to __tests__/setup/conditions/suggestedActionsShown.js index e8d28238b2..7255e12d23 100644 --- a/__tests__/setup/conditions/suggestedActionsShowed.js +++ b/__tests__/setup/conditions/suggestedActionsShown.js @@ -1,5 +1,5 @@ import { By, until } from 'selenium-webdriver'; -export default function suggestedActionsShowed() { +export default function suggestedActionsShown() { return until.elementLocated(By.css('[role="form"] ul')); } diff --git a/__tests__/setup/pageObjects/getActivityElements.js b/__tests__/setup/elements/getActivityElements.js similarity index 100% rename from __tests__/setup/pageObjects/getActivityElements.js rename to __tests__/setup/elements/getActivityElements.js diff --git a/__tests__/setup/pageObjects/getMicrophoneButton.js b/__tests__/setup/elements/getMicrophoneButton.js similarity index 100% rename from __tests__/setup/pageObjects/getMicrophoneButton.js rename to __tests__/setup/elements/getMicrophoneButton.js diff --git a/__tests__/setup/elements/getSendBoxTextBox.js b/__tests__/setup/elements/getSendBoxTextBox.js new file mode 100644 index 0000000000..9e7337354e --- /dev/null +++ b/__tests__/setup/elements/getSendBoxTextBox.js @@ -0,0 +1,9 @@ +import { By } from 'selenium-webdriver'; + +const CSS_SELECTOR = '[role="form"] > * > form > input[type="text"]'; + +export default async function getSendBoxTextBox(driver) { + return await driver.findElement(By.css(CSS_SELECTOR)); +} + +export { CSS_SELECTOR }; diff --git a/__tests__/setup/elements/getSendButton.js b/__tests__/setup/elements/getSendButton.js new file mode 100644 index 0000000000..730e5f8e7e --- /dev/null +++ b/__tests__/setup/elements/getSendButton.js @@ -0,0 +1,5 @@ +import { By } from 'selenium-webdriver'; + +export default async function getSendButton(driver) { + return await driver.findElement(By.css('[role="form"] button[title="Send"]')); +} diff --git a/__tests__/setup/elements/getSuggestedActionButtons.js b/__tests__/setup/elements/getSuggestedActionButtons.js new file mode 100644 index 0000000000..43a13a590c --- /dev/null +++ b/__tests__/setup/elements/getSuggestedActionButtons.js @@ -0,0 +1,5 @@ +import { By } from 'selenium-webdriver'; + +export default async function getSuggestedActionButtons(driver) { + return await driver.findElements(By.css('[role="form"] > :nth-child(2) ul > li button')); +} diff --git a/__tests__/setup/pageObjects/getUploadButton.js b/__tests__/setup/elements/getUploadButton.js similarity index 100% rename from __tests__/setup/pageObjects/getUploadButton.js rename to __tests__/setup/elements/getUploadButton.js diff --git a/__tests__/setup/pageObjects/clickMicrophoneButton.js b/__tests__/setup/pageObjects/clickMicrophoneButton.js new file mode 100644 index 0000000000..1e8a5f53d3 --- /dev/null +++ b/__tests__/setup/pageObjects/clickMicrophoneButton.js @@ -0,0 +1,7 @@ +import getMicrophoneButton from '../elements/getMicrophoneButton'; + +export default async function clickMicrophoneButton(driver) { + const microphoneButton = await getMicrophoneButton(driver); + + await microphoneButton.click(); +} diff --git a/__tests__/setup/pageObjects/clickSendButton.js b/__tests__/setup/pageObjects/clickSendButton.js new file mode 100644 index 0000000000..4f809ac4d2 --- /dev/null +++ b/__tests__/setup/pageObjects/clickSendButton.js @@ -0,0 +1,5 @@ +import getSendButton from '../elements/getSendButton'; + +export default async function clickSendButton(driver) { + (await getSendButton(driver)).click(); +} diff --git a/__tests__/setup/pageObjects/clickSuggestedActionButton.js b/__tests__/setup/pageObjects/clickSuggestedActionButton.js new file mode 100644 index 0000000000..956ae071ab --- /dev/null +++ b/__tests__/setup/pageObjects/clickSuggestedActionButton.js @@ -0,0 +1,7 @@ +import getSuggestedActionButtons from '../elements/getSuggestedActionButtons'; + +export default async function clickSuggestedActionButton(driver, index) { + const suggestedActions = await getSuggestedActionButtons(driver); + + await suggestedActions[index].click(); +} diff --git a/__tests__/setup/pageObjects/getNumActivitiesShown.js b/__tests__/setup/pageObjects/getNumActivitiesShown.js new file mode 100644 index 0000000000..87e79e0f10 --- /dev/null +++ b/__tests__/setup/pageObjects/getNumActivitiesShown.js @@ -0,0 +1,5 @@ +import { By } from 'selenium-webdriver'; + +export default async function getNumActivitiesShown(driver) { + return (await driver.findElements(By.css(`[role="listitem"]`))).length; +} diff --git a/__tests__/setup/pageObjects/getSendBoxText.js b/__tests__/setup/pageObjects/getSendBoxText.js new file mode 100644 index 0000000000..c52c448bf7 --- /dev/null +++ b/__tests__/setup/pageObjects/getSendBoxText.js @@ -0,0 +1,7 @@ +import getSendBoxTextBox from '../elements/getSendBoxTextBox'; + +export default async function getSendBoxText(driver) { + const textBox = await getSendBoxTextBox(driver); + + return await textBox.getAttribute('value'); +} diff --git a/__tests__/setup/pageObjects/getSendBoxTextBox.js b/__tests__/setup/pageObjects/getSendBoxTextBox.js deleted file mode 100644 index 8f8861c6dc..0000000000 --- a/__tests__/setup/pageObjects/getSendBoxTextBox.js +++ /dev/null @@ -1,5 +0,0 @@ -import { By } from 'selenium-webdriver'; - -export default async function getSendBoxTextBox(driver) { - return await driver.findElement(By.css('[role="form"] > * > form > input[type="text"]')); -} diff --git a/__tests__/setup/pageObjects/hasPendingSpeechSynthesisUtterance.js b/__tests__/setup/pageObjects/hasPendingSpeechSynthesisUtterance.js deleted file mode 100644 index fe73f452eb..0000000000 --- a/__tests__/setup/pageObjects/hasPendingSpeechSynthesisUtterance.js +++ /dev/null @@ -1,3 +0,0 @@ -export default async function hasPendingSpeechSynthesisUtterance(driver) { - return await driver.executeScript(() => window.WebSpeechMock.hasPendingUtterance()); -} diff --git a/__tests__/setup/pageObjects/index.js b/__tests__/setup/pageObjects/index.js index edbea7ec4b..32fc37a4e2 100644 --- a/__tests__/setup/pageObjects/index.js +++ b/__tests__/setup/pageObjects/index.js @@ -1,19 +1,20 @@ +import clickMicrophoneButton from './clickMicrophoneButton'; +import clickSendButton from './clickSendButton'; +import clickSuggestedActionButton from './clickSuggestedActionButton'; import dispatchAction from './dispatchAction'; import endSpeechSynthesize from './endSpeechSynthesize'; import executePromiseScript from './executePromiseScript'; -import getActivityElements from './getActivityElements'; -import getMicrophoneButton from './getMicrophoneButton'; -import getSendBoxTextBox from './getSendBoxTextBox'; +import getNumActivitiesShown from './getNumActivitiesShown'; +import getSendBoxText from './getSendBoxText'; import getStore from './getStore'; -import getUploadButton from './getUploadButton'; -import hasPendingSpeechSynthesisUtterance from './hasPendingSpeechSynthesisUtterance'; -import isRecognizingSpeech from './isRecognizingSpeech'; +import isDictating from './isDictating'; import pingBot from './pingBot'; import putSpeechRecognitionResult from './putSpeechRecognitionResult'; import sendFile from './sendFile'; import sendMessageViaMicrophone from './sendMessageViaMicrophone'; import sendMessageViaSendBox from './sendMessageViaSendBox'; import startSpeechSynthesize from './startSpeechSynthesize'; +import typeOnSendBox from './typeOnSendBox'; function mapMap(map, mapper) { return Object.keys(map).reduce((final, key) => { @@ -26,22 +27,23 @@ function mapMap(map, mapper) { export default function pageObjects(driver) { return mapMap( { + clickMicrophoneButton, + clickSendButton, + clickSuggestedActionButton, dispatchAction, endSpeechSynthesize, executePromiseScript, - getActivityElements, - getMicrophoneButton, - getSendBoxTextBox, + getNumActivitiesShown, + getSendBoxText, getStore, - getUploadButton, - hasPendingSpeechSynthesisUtterance, - isRecognizingSpeech, + isDictating, pingBot, putSpeechRecognitionResult, sendFile, sendMessageViaMicrophone, sendMessageViaSendBox, - startSpeechSynthesize + startSpeechSynthesize, + typeOnSendBox }, fn => fn.bind(null, driver) ); diff --git a/__tests__/setup/pageObjects/isDictating.js b/__tests__/setup/pageObjects/isDictating.js new file mode 100644 index 0000000000..995084132c --- /dev/null +++ b/__tests__/setup/pageObjects/isDictating.js @@ -0,0 +1,10 @@ +import { By } from 'selenium-webdriver'; + +export default async function isDictating(driver) { + const microphoneButtonContainer = await driver.findElement( + By.css('[aria-controls="webchatSendBoxMicrophoneButton"]') + ); + const microphoneButtonClassName = await microphoneButtonContainer.getAttribute('class'); + + return microphoneButtonClassName.split(' ').includes('dictating'); +} diff --git a/__tests__/setup/pageObjects/isRecognizingSpeech.js b/__tests__/setup/pageObjects/isRecognizingSpeech.js deleted file mode 100644 index 99d414f7b3..0000000000 --- a/__tests__/setup/pageObjects/isRecognizingSpeech.js +++ /dev/null @@ -1,3 +0,0 @@ -export default async function isRecognizingSpeech(driver) { - return await driver.executeScript(() => window.WebSpeechMock.isRecognizing()); -} diff --git a/__tests__/setup/pageObjects/sendFile.js b/__tests__/setup/pageObjects/sendFile.js index 7ef1418f41..6379c1bcb5 100644 --- a/__tests__/setup/pageObjects/sendFile.js +++ b/__tests__/setup/pageObjects/sendFile.js @@ -1,8 +1,9 @@ import { join, posix } from 'path'; + import { timeouts } from '../../constants.json'; import allOutgoingActivitiesSent from '../conditions/allOutgoingActivitiesSent'; -import getActivityElements from './getActivityElements'; -import getUploadButton from './getUploadButton'; +import getActivityElements from '../elements/getActivityElements'; +import getUploadButton from '../elements/getUploadButton'; import minNumActivitiesShown from '../conditions/minNumActivitiesShown.js'; function resolveDockerFile(filename) { diff --git a/__tests__/setup/pageObjects/sendMessageViaMicrophone.js b/__tests__/setup/pageObjects/sendMessageViaMicrophone.js index 033119350f..c3f767dd97 100644 --- a/__tests__/setup/pageObjects/sendMessageViaMicrophone.js +++ b/__tests__/setup/pageObjects/sendMessageViaMicrophone.js @@ -1,15 +1,15 @@ import { timeouts } from '../../constants.json'; + import allOutgoingActivitiesSent from '../conditions/allOutgoingActivitiesSent'; -import getMicrophoneButton from './getMicrophoneButton'; +import clickMicrophoneButton from './clickMicrophoneButton'; import putSpeechRecognitionResult from './putSpeechRecognitionResult'; -import speechRecognitionStarted from '../conditions/speechRecognitionStarted'; +import speechRecognitionStartCalled from '../conditions/speechRecognitionStartCalled'; export default async function sendMessageViaMicrophone(driver, text, { waitForSend = true } = {}) { - const microphoneButton = await getMicrophoneButton(driver); + await clickMicrophoneButton(driver); - await microphoneButton.click(); + await driver.wait(speechRecognitionStartCalled(), timeouts.ui); - await driver.wait(speechRecognitionStarted(), timeouts.ui); await putSpeechRecognitionResult(driver, 'recognize', text); waitForSend && (await driver.wait(allOutgoingActivitiesSent(), timeouts.directLine)); diff --git a/__tests__/setup/pageObjects/sendMessageViaSendBox.js b/__tests__/setup/pageObjects/sendMessageViaSendBox.js index 118c05515c..b24f8e49c9 100644 --- a/__tests__/setup/pageObjects/sendMessageViaSendBox.js +++ b/__tests__/setup/pageObjects/sendMessageViaSendBox.js @@ -2,12 +2,10 @@ import { Key } from 'selenium-webdriver'; import { timeouts } from '../../constants.json'; import allOutgoingActivitiesSent from '../conditions/allOutgoingActivitiesSent'; -import getSendBoxTextBox from './getSendBoxTextBox'; +import typeOnSendBox from './typeOnSendBox'; export default async function sendMessageViaSendBox(driver, text, { waitForSend = true } = {}) { - const input = await getSendBoxTextBox(driver); - - await input.sendKeys(text, Key.RETURN); + await typeOnSendBox(driver, text, Key.RETURN); waitForSend && (await driver.wait(allOutgoingActivitiesSent(), timeouts.directLine)); } diff --git a/__tests__/setup/pageObjects/typeOnSendBox.js b/__tests__/setup/pageObjects/typeOnSendBox.js new file mode 100644 index 0000000000..44d8fc72c8 --- /dev/null +++ b/__tests__/setup/pageObjects/typeOnSendBox.js @@ -0,0 +1,7 @@ +import getSendBoxTextBox from '../elements/getSendBoxTextBox'; + +export default async function typeOnSendBox(driver, ...args) { + const textBox = await getSendBoxTextBox(driver); + + await textBox.sendKeys(...args); +} diff --git a/__tests__/setup/web/mockWebSpeech.js b/__tests__/setup/web/mockWebSpeech.js index 5c6dd97129..08a7aeebf4 100644 --- a/__tests__/setup/web/mockWebSpeech.js +++ b/__tests__/setup/web/mockWebSpeech.js @@ -24,12 +24,12 @@ function createProducerConsumer() { cancel() { jobs.splice(0); }, - consume(consumer) { - consumers.push(consumer); - jobs.length && consumers.shift()(...jobs.shift()); + consume(fn, context) { + consumers.push({ fn, context }); + jobs.length && consumers.shift().fn(...jobs.shift()); }, hasConsumer() { - return !!consumers.length; + return !!consumers.length && consumers[0].context; }, hasJob() { return !!jobs.length; @@ -39,7 +39,7 @@ function createProducerConsumer() { }, produce(...args) { jobs.push(args); - consumers.length && consumers.shift()(...jobs.shift()); + consumers.length && consumers.shift().fn(...jobs.shift()); } }; } @@ -62,9 +62,13 @@ class SpeechRecognition extends EventTarget { } start() { - speechRecognitionBroker.consume((command, ...args) => { - this[command](...args); - }); + speechRecognitionBroker.consume((scenario, ...args) => { + if (!this[scenario]) { + throw new Error(`Cannot find speech scenario named "${scenario}" in mockWebSpeech.js`); + } else { + this[scenario](...args); + } + }, this); } microphoneMuted() { @@ -150,6 +154,18 @@ class SpeechRecognition extends EventTarget { this.dispatchEvent({ type: 'end' }); } + recognizing(transcript) { + this.abort = this.stop = NULL_FN; + + this.dispatchEvent({ type: 'start' }); + this.dispatchEvent({ type: 'audiostart' }); + this.dispatchEvent({ type: 'soundstart' }); + this.dispatchEvent({ type: 'speechstart' }); + + this.interimResults && + this.dispatchEvent({ type: 'result', results: createSpeechRecognitionResults(false, transcript) }); + } + recognizeButAborted(transcript) { this.abort = () => { this.dispatchEvent({ type: 'speechend' }); @@ -215,8 +231,15 @@ const SPEECH_SYNTHESIS_VOICES = [ default: true, lang: 'en-US', localService: true, - name: 'Mock Voice', - voiceURI: 'mock://web-speech/voice' + name: 'Mock Voice (en-US)', + voiceURI: 'mock://web-speech/voice/en-US' + }, + { + default: false, + lang: 'zh-YUE', + localService: true, + name: 'Mock Voice (zh-YUE)', + voiceURI: 'mock://web-speech/voice/zh-YUE' } ]; @@ -274,14 +297,6 @@ class SpeechSynthesisUtterance extends EventTarget { ); window.WebSpeechMock = { - hasPendingUtterance() { - return speechSynthesisBroker.hasJob(); - }, - - isRecognizing() { - return speechRecognitionBroker.hasConsumer(); - }, - mockEndSynthesize() { return new Promise(resolve => { speechSynthesisBroker.consume(utterance => { @@ -312,6 +327,29 @@ window.WebSpeechMock = { return { lang, pitch, rate, text, voice, volume }; }, + speechRecognitionStartCalled() { + const context = speechRecognitionBroker.hasConsumer(); + + if (context) { + const { continuous, grammars, interimResults, lang, maxAlternatives, serviceURI } = context; + + return { + continuous, + grammars, + interimResults, + lang, + maxAlternatives, + serviceURI + }; + } else { + return false; + } + }, + + speechSynthesisUtterancePended() { + return speechSynthesisBroker.hasJob(); + }, + SpeechGrammarList, SpeechRecognition, speechSynthesis: new SpeechSynthesis(), diff --git a/__tests__/speech.js b/__tests__/speech.js deleted file mode 100644 index 0b4a61e94c..0000000000 --- a/__tests__/speech.js +++ /dev/null @@ -1,41 +0,0 @@ -import { timeouts } from './constants.json'; - -import minNumActivitiesShown from './setup/conditions/minNumActivitiesShown'; -import { negate as speechRecognitionNotStarted } from './setup/conditions/speechRecognitionStarted'; -import speechSynthesisPending, { negate as speechSynthesisNotPending } from './setup/conditions/speechSynthesisPending'; - -// selenium-webdriver API doc: -// https://seleniumhq.github.io/selenium/docs/api/javascript/module/selenium-webdriver/index_exports_WebDriver.html - -jest.setTimeout(timeouts.test); - -describe('speech recognition', () => { - test('should not start recognition after typing on keyboard while synthesizing', async () => { - const { driver, pageObjects } = await setupWebDriver({ - props: { - webSpeechPonyfillFactory: () => window.WebSpeechMock - } - }); - - await pageObjects.sendMessageViaMicrophone('Hello, World!'); - - await driver.wait(minNumActivitiesShown(2), timeouts.directLine); - await driver.wait(speechSynthesisPending(), timeouts.ui); - - const utterance = await pageObjects.startSpeechSynthesize(); - - expect(utterance).toHaveProperty( - 'text', - `Unknown command: I don't know Hello, World!. You can say \"help\" to learn more.` - ); - - const sendBoxTextBox = await pageObjects.getSendBoxTextBox(); - - await sendBoxTextBox.sendKeys('Aloha!'); - - await driver.wait(speechSynthesisNotPending(), timeouts.ui); - await driver.wait(speechRecognitionNotStarted(), timeouts.ui); - - expect(pageObjects.isRecognizingSpeech()).resolves.toBeFalsy(); - }); -}); diff --git a/__tests__/speech.recognition.js b/__tests__/speech.recognition.js new file mode 100644 index 0000000000..cac47cd6d6 --- /dev/null +++ b/__tests__/speech.recognition.js @@ -0,0 +1,225 @@ +import { timeouts } from './constants.json'; +import minNumActivitiesShown from './setup/conditions/minNumActivitiesShown'; +import negateCondition from './setup/conditions/negate'; +import speechRecognitionStartCalled from './setup/conditions/speechRecognitionStartCalled'; +import speechSynthesisUtterancePended from './setup/conditions/speechSynthesisUtterancePended'; + +// selenium-webdriver API doc: +// https://seleniumhq.github.io/selenium/docs/api/javascript/module/selenium-webdriver/index_exports_WebDriver.html + +jest.setTimeout(timeouts.test); + +describe('speech recognition', () => { + test('should not start recognition after typing on keyboard while synthesizing', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.sendMessageViaMicrophone('hint expecting input'); + + await driver.wait(minNumActivitiesShown(2), timeouts.directLine); + await driver.wait(speechSynthesisUtterancePended(), timeouts.ui); + + await pageObjects.startSpeechSynthesize(); + await pageObjects.typeOnSendBox('Aloha!'); + + await driver.wait(negateCondition(speechSynthesisUtterancePended()), timeouts.ui); + await expect(pageObjects.isDictating()).resolves.toBeFalsy(); + }); + + test('should start recognition after clicking on microphone button', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.clickMicrophoneButton(); + + await driver.wait(speechRecognitionStartCalled(), timeouts.ui); + }); + + test('should stop recognition after clicking on microphone button while recognizing', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.clickMicrophoneButton(); + + await driver.wait(speechRecognitionStartCalled(), timeouts.ui); + + await pageObjects.putSpeechRecognitionResult('recognizing', 'Hello'); + + await expect(pageObjects.isDictating()).resolves.toBeTruthy(); + + await pageObjects.clickMicrophoneButton(); + + await expect(pageObjects.isDictating()).resolves.toBeFalsy(); + await expect(pageObjects.getSendBoxText()).resolves.toBe('Hello'); + }); + + test('should not send anything on muted microphone', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.clickMicrophoneButton(); + + await driver.wait(speechRecognitionStartCalled(), timeouts.ui); + + await pageObjects.putSpeechRecognitionResult('microphoneMuted'); + + await expect(pageObjects.isDictating()).resolves.toBeFalsy(); + await expect(pageObjects.getNumActivitiesShown(0)).resolves.toBe(0); + }); + + test('should not send anything on bird tweet', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.clickMicrophoneButton(); + + await driver.wait(speechRecognitionStartCalled(), timeouts.ui); + + await pageObjects.putSpeechRecognitionResult('birdTweet'); + + await expect(pageObjects.isDictating()).resolves.toBeFalsy(); + await expect(pageObjects.getNumActivitiesShown(0)).resolves.toBe(0); + }); + + test('should not send anything on unrecognizable speech', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.clickMicrophoneButton(); + + await driver.wait(speechRecognitionStartCalled(), timeouts.ui); + + await pageObjects.putSpeechRecognitionResult('unrecognizableSpeech'); + + await expect(pageObjects.isDictating()).resolves.toBeFalsy(); + await expect(pageObjects.getNumActivitiesShown(0)).resolves.toBe(0); + }); + + test('should not send anything on airplane mode', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.clickMicrophoneButton(); + + await driver.wait(speechRecognitionStartCalled(), timeouts.ui); + + await pageObjects.putSpeechRecognitionResult('airplaneMode'); + + await expect(pageObjects.isDictating()).resolves.toBeFalsy(); + await expect(pageObjects.getNumActivitiesShown(0)).resolves.toBe(0); + }); + + test('should not send anything when access to microphone is denied', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.clickMicrophoneButton(); + + await driver.wait(speechRecognitionStartCalled(), timeouts.ui); + + await pageObjects.putSpeechRecognitionResult('accessDenied'); + + await expect(pageObjects.isDictating()).resolves.toBeFalsy(); + await expect(pageObjects.getNumActivitiesShown(0)).resolves.toBe(0); + }); + + test('should not send anything when abort immediately after audio start', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.clickMicrophoneButton(); + + await driver.wait(speechRecognitionStartCalled(), timeouts.ui); + + await pageObjects.putSpeechRecognitionResult('abortAfterAudioStart'); + + await expect(pageObjects.isDictating()).resolves.toBeTruthy(); + + await pageObjects.clickMicrophoneButton(); + + await expect(pageObjects.isDictating()).resolves.toBeFalsy(); + await expect(pageObjects.getNumActivitiesShown(0)).resolves.toBe(0); + }); + + test('should not send anything if aborted while recognizing', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.clickMicrophoneButton(); + + await driver.wait(speechRecognitionStartCalled(), timeouts.ui); + + await pageObjects.putSpeechRecognitionResult('recognizeButAborted', 'Hello'); + + await expect(pageObjects.isDictating()).resolves.toBeTruthy(); + + await pageObjects.clickMicrophoneButton(); + + await expect(pageObjects.isDictating()).resolves.toBeFalsy(); + await expect(pageObjects.getNumActivitiesShown(0)).resolves.toBe(0); + await expect(pageObjects.getSendBoxText()).resolves.toBe('Hello'); + }); + + test('should not send anything if recognize is complete but not confident', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.clickMicrophoneButton(); + + await driver.wait(speechRecognitionStartCalled(), timeouts.ui); + + await pageObjects.putSpeechRecognitionResult('recognizeButNotConfident', 'Hello'); + + await expect(pageObjects.isDictating()).resolves.toBeFalsy(); + await expect(pageObjects.getNumActivitiesShown(0)).resolves.toBe(0); + + // Web Speech API will send finalized result with empty string + await expect(pageObjects.getSendBoxText()).resolves.toBe(''); + }); + + test('should set recognition language', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + locale: 'zh-YUE', + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.clickMicrophoneButton(); + + await expect(speechRecognitionStartCalled().fn(driver)).resolves.toHaveProperty('lang', 'zh-YUE'); + }); +}); diff --git a/__tests__/speech.selectVoice.js b/__tests__/speech.selectVoice.js new file mode 100644 index 0000000000..902cbf4326 --- /dev/null +++ b/__tests__/speech.selectVoice.js @@ -0,0 +1,57 @@ +import { timeouts } from './constants.json'; + +import minNumActivitiesShown from './setup/conditions/minNumActivitiesShown'; +import speechSynthesisUtterancePended from './setup/conditions/speechSynthesisUtterancePended'; + +// selenium-webdriver API doc: +// https://seleniumhq.github.io/selenium/docs/api/javascript/module/selenium-webdriver/index_exports_WebDriver.html + +jest.setTimeout(timeouts.test); + +describe('selecting voice based on language', () => { + describe('based on language', () => { + test('of en-US', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + locale: 'en-US', + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.sendMessageViaMicrophone('echo 123'); + + await driver.wait(minNumActivitiesShown(2), timeouts.directLine); + await driver.wait(speechSynthesisUtterancePended(), timeouts.ui); + + await expect(pageObjects.startSpeechSynthesize()).resolves.toHaveProperty('voice', { + default: true, + lang: 'en-US', + localService: true, + name: 'Mock Voice (en-US)', + voiceURI: 'mock://web-speech/voice/en-US' + }); + }); + + test('of zh-YUE', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + locale: 'zh-YUE', + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.sendMessageViaMicrophone('echo 123'); + + await driver.wait(minNumActivitiesShown(2), timeouts.directLine); + await driver.wait(speechSynthesisUtterancePended(), timeouts.ui); + + await expect(pageObjects.startSpeechSynthesize()).resolves.toHaveProperty('voice', { + default: false, + lang: 'zh-YUE', + localService: true, + name: 'Mock Voice (zh-YUE)', + voiceURI: 'mock://web-speech/voice/zh-YUE' + }); + }); + }); +}); diff --git a/__tests__/speech.synthesis.js b/__tests__/speech.synthesis.js new file mode 100644 index 0000000000..a56bc91156 --- /dev/null +++ b/__tests__/speech.synthesis.js @@ -0,0 +1,57 @@ +import { timeouts } from './constants.json'; + +import minNumActivitiesShown from './setup/conditions/minNumActivitiesShown'; +import speechSynthesisUtterancePended from './setup/conditions/speechSynthesisUtterancePended'; + +// selenium-webdriver API doc: +// https://seleniumhq.github.io/selenium/docs/api/javascript/module/selenium-webdriver/index_exports_WebDriver.html + +jest.setTimeout(timeouts.test); + +describe('speech synthesis', () => { + // Verification of fix of #1736, https://github.com/microsoft/BotFramework-WebChat/issues/1736 + test('should synthesize two consecutive messages', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.sendMessageViaMicrophone('echo 123'); + + await driver.wait(minNumActivitiesShown(3), timeouts.directLine); + await driver.wait(speechSynthesisUtterancePended(), timeouts.ui); + + await expect(pageObjects.startSpeechSynthesize()).resolves.toHaveProperty( + 'text', + 'Echoing back in a separate activity.' + ); + + await pageObjects.endSpeechSynthesize(); + + await expect(pageObjects.startSpeechSynthesize()).resolves.toHaveProperty('text', '123'); + + await pageObjects.endSpeechSynthesize(); + }); + + // Verification of fix of #2096, https://github.com/microsoft/BotFramework-WebChat/issues/2096 + test('should synthesize speak property of Adaptive Card', async () => { + const { driver, pageObjects } = await setupWebDriver({ + props: { + webSpeechPonyfillFactory: () => window.WebSpeechMock + } + }); + + await pageObjects.sendMessageViaMicrophone('card bingsports'); + + await driver.wait(minNumActivitiesShown(2), timeouts.directLine); + await driver.wait(speechSynthesisUtterancePended(), timeouts.ui); + + await expect(pageObjects.startSpeechSynthesize()).resolves.toHaveProperty( + 'text', + 'Showing bingsports\r\nThe Seattle Seahawks beat the Carolina Panthers 40-7' + ); + + await pageObjects.endSpeechSynthesize(); + }); +}); diff --git a/__tests__/suggestedActions.js b/__tests__/suggestedActions.js index ec7bfea4be..14b1fd5a9e 100644 --- a/__tests__/suggestedActions.js +++ b/__tests__/suggestedActions.js @@ -5,7 +5,7 @@ import { imageSnapshotOptions, timeouts } from './constants.json'; import allImagesLoaded from './setup/conditions/allImagesLoaded'; import allOutgoingActivitiesSent from './setup/conditions/allOutgoingActivitiesSent'; import minNumActivitiesShown from './setup/conditions/minNumActivitiesShown'; -import suggestedActionsShowed from './setup/conditions/suggestedActionsShowed'; +import suggestedActionsShown from './setup/conditions/suggestedActionsShown'; import uiConnected from './setup/conditions/uiConnected'; // selenium-webdriver API doc: @@ -20,7 +20,7 @@ describe('suggested-actions command', () => { await driver.wait(uiConnected(), timeouts.directLine); await pageObjects.sendMessageViaSendBox('suggested-actions', { waitForSend: true }); - await driver.wait(suggestedActionsShowed(), timeouts.directLine); + await driver.wait(suggestedActionsShown(), timeouts.directLine); await driver.wait(allImagesLoaded(), 2000); const base64PNG = await driver.takeScreenshot(); @@ -34,7 +34,7 @@ describe('suggested-actions command', () => { await driver.wait(uiConnected(), timeouts.directLine); await pageObjects.sendMessageViaSendBox('suggested-actions', { waitForSend: true }); - await driver.wait(suggestedActionsShowed(), timeouts.directLine); + await driver.wait(suggestedActionsShown(), timeouts.directLine); const buttons = await driver.findElements(By.css('button')); @@ -55,7 +55,7 @@ describe('suggested-actions command', () => { await driver.wait(uiConnected(), timeouts.directLine); await pageObjects.sendMessageViaSendBox('suggested-actions', { waitForSend: true }); - await driver.wait(suggestedActionsShowed(), timeouts.directLine); + await driver.wait(suggestedActionsShown(), timeouts.directLine); const buttons = await driver.findElements(By.css('button')); @@ -76,7 +76,7 @@ describe('suggested-actions command', () => { await driver.wait(uiConnected(), timeouts.directLine); await pageObjects.sendMessageViaSendBox('suggested-actions', { waitForSend: true }); - await driver.wait(suggestedActionsShowed(), timeouts.directLine); + await driver.wait(suggestedActionsShown(), timeouts.directLine); const buttons = await driver.findElements(By.css('button')); @@ -97,7 +97,7 @@ describe('suggested-actions command', () => { await driver.wait(uiConnected(), timeouts.directLine); await pageObjects.sendMessageViaSendBox('suggested-actions', { waitForSend: true }); - await driver.wait(suggestedActionsShowed(), timeouts.directLine); + await driver.wait(suggestedActionsShown(), timeouts.directLine); const buttons = await driver.findElements(By.css('button')); @@ -118,7 +118,7 @@ describe('suggested-actions command', () => { await driver.wait(uiConnected(), timeouts.directLine); await pageObjects.sendMessageViaSendBox('suggested-actions', { waitForSend: true }); - await driver.wait(suggestedActionsShowed(), timeouts.directLine); + await driver.wait(suggestedActionsShown(), timeouts.directLine); const buttons = await driver.findElements(By.css('button')); @@ -152,7 +152,7 @@ describe('suggested-actions command', () => { await driver.wait(uiConnected(), timeouts.directLine); await pageObjects.sendMessageViaSendBox('emptycard', { waitForSend: true }); - await driver.wait(minNumActivitiesShown(1), timeouts.directLine); + await driver.wait(suggestedActionsShown(), timeouts.directLine); await driver.wait(allImagesLoaded(), 2000); const base64PNG = await driver.takeScreenshot(); @@ -167,7 +167,7 @@ describe('suggested-actions command', () => { await driver.wait(uiConnected(), timeouts.directLine); await pageObjects.sendMessageViaSendBox('emptycard', { waitForSend: true }); - await driver.wait(minNumActivitiesShown(1), timeouts.directLine); + await driver.wait(suggestedActionsShown(), timeouts.directLine); await driver.wait(allImagesLoaded(), 2000); const base64PNG = await driver.takeScreenshot();