pd: USB Power Delivery State Machine based on Revision 3.0 of the spec.

Adds Power Delivery to the TypeC state machine as detailed in
Revision 3.0, Version 1.2 of the specification.

This CL passes the PD2.0 compliance tests and has been tested on
several devices. Some areas such as handling Electronically Marked
Cable information, creation of PIDs, and Host commands will be
addressed in later CLs.

BUG=b:130895206
BRANCH=none
TEST=manual
  Port 0 on Hatch was used to run this CL, merged with PD functionality,
  on the PD2.0 Compliance tester. All tests pass except for a few
  physical layer tests. The test report has been added to the bug.

  Atlas was verified to work with Apple, Amazon, StarTech, MKDGO and
  several other generic docks.

  Atlas was verified to work with Google's and Apple's CTVPD.

Signed-off-by: Sam Hurst <shurst@chromium.org>

Change-Id: Ia5e1988b0d81ec4cf9a7175e273197bd5a0865e4
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/ec/+/1737899
Reviewed-by: Jett Rink <jettrink@chromium.org>
Reviewed-by: Denis Brockus <dbrockus@chromium.org>
Tested-by: Jett Rink <jettrink@chromium.org>
Commit-Queue: Denis Brockus <dbrockus@chromium.org>
This commit is contained in:
Sam Hurst 2019-08-01 09:09:10 -07:00 committed by Commit Bot
parent 16ce272e65
commit c32c1ae24c
16 changed files with 7061 additions and 369 deletions

View File

@ -17,6 +17,7 @@
#include "tcpm.h"
#include "timer.h"
#include "util.h"
#include "usb_common.h"
#include "usb_mux.h"
#include "usb_pd.h"
#include "usb_pd_config.h"
@ -287,8 +288,10 @@ static void update_ports(void)
break;
/* Find the 'best' PDO <= voltage */
pdo_index = pd_find_pdo_index(
CHG, pd_src_voltages_mv[i], &pdo);
pdo_index =
pd_find_pdo_index(pd_get_src_cap_cnt(CHG),
pd_get_src_caps(CHG),
pd_src_voltages_mv[i], &pdo);
/* Don't duplicate PDOs */
if (pdo_index == snk_index)
continue;

View File

@ -12,6 +12,7 @@
#include "charge_state.h"
#include "usb_pd.h"
#include "usb_pd_tcpm.h"
#include "util.h"
int usb_get_battery_soc(void)
{
@ -81,3 +82,192 @@ enum pd_cc_polarity_type get_snk_polarity(enum tcpc_cc_voltage_status cc1,
*/
return cc2 > cc1;
}
/*
* Zinger implements a board specific usb policy that does not define
* PD_MAX_VOLTAGE_MV and PD_OPERATING_POWER_MW. And in turn, does not
* use the following functions.
*/
#if defined(PD_MAX_VOLTAGE_MV) && defined(PD_OPERATING_POWER_MW)
int pd_find_pdo_index(uint32_t src_cap_cnt, const uint32_t * const src_caps,
int max_mv, uint32_t *selected_pdo)
{
int i, uw, mv;
int ret = 0;
int cur_uw = 0;
int prefer_cur;
int __attribute__((unused)) cur_mv = 0;
/* max voltage is always limited by this boards max request */
max_mv = MIN(max_mv, PD_MAX_VOLTAGE_MV);
/* Get max power that is under our max voltage input */
for (i = 0; i < src_cap_cnt; i++) {
/* its an unsupported Augmented PDO (PD3.0) */
if ((src_caps[i] & PDO_TYPE_MASK) == PDO_TYPE_AUGMENTED)
continue;
mv = ((src_caps[i] >> 10) & 0x3FF) * 50;
/* Skip invalid voltage */
if (!mv)
continue;
/* Skip any voltage not supported by this board */
if (!pd_is_valid_input_voltage(mv))
continue;
if ((src_caps[i] & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
uw = 250000 * (src_caps[i] & 0x3FF);
} else {
int ma = (src_caps[i] & 0x3FF) * 10;
ma = MIN(ma, PD_MAX_CURRENT_MA);
uw = ma * mv;
}
if (mv > max_mv)
continue;
uw = MIN(uw, PD_MAX_POWER_MW * 1000);
prefer_cur = 0;
/* Apply special rules in case of 'tie' */
if (IS_ENABLED(PD_PREFER_LOW_VOLTAGE)) {
if (uw == cur_uw && mv < cur_mv)
prefer_cur = 1;
} else if (IS_ENABLED(PD_PREFER_HIGH_VOLTAGE)) {
if (uw == cur_uw && mv > cur_mv)
prefer_cur = 1;
}
/* Prefer higher power, except for tiebreaker */
if (uw > cur_uw || prefer_cur) {
ret = i;
cur_uw = uw;
cur_mv = mv;
}
}
if (selected_pdo)
*selected_pdo = src_caps[ret];
return ret;
}
void pd_extract_pdo_power(uint32_t pdo, uint32_t *ma, uint32_t *mv)
{
int max_ma, uw;
*mv = ((pdo >> 10) & 0x3FF) * 50;
if (*mv == 0) {
*ma = 0;
return;
}
if ((pdo & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
uw = 250000 * (pdo & 0x3FF);
max_ma = 1000 * MIN(1000 * uw, PD_MAX_POWER_MW) / *mv;
} else {
max_ma = 10 * (pdo & 0x3FF);
max_ma = MIN(max_ma, PD_MAX_POWER_MW * 1000 / *mv);
}
*ma = MIN(max_ma, PD_MAX_CURRENT_MA);
}
void pd_build_request(uint32_t src_cap_cnt, const uint32_t * const src_caps,
int32_t vpd_vdo, uint32_t *rdo, uint32_t *ma,
uint32_t *mv, enum pd_request_type req_type,
uint32_t max_request_mv)
{
uint32_t pdo;
int pdo_index, flags = 0;
int uw;
int max_or_min_ma;
int max_or_min_mw;
int max_vbus;
int vpd_vbus_dcr;
int vpd_gnd_dcr;
if (req_type == PD_REQUEST_VSAFE5V) {
/* src cap 0 should be vSafe5V */
pdo_index = 0;
pdo = src_caps[0];
} else {
/* find pdo index for max voltage we can request */
pdo_index = pd_find_pdo_index(src_cap_cnt, src_caps,
max_request_mv, &pdo);
}
pd_extract_pdo_power(pdo, ma, mv);
/*
* Adjust VBUS current if CTVPD device was detected.
*/
if (vpd_vdo > 0) {
max_vbus = VPD_VDO_MAX_VBUS(vpd_vdo);
vpd_vbus_dcr = VPD_VDO_VBUS_IMP(vpd_vdo) << 1;
vpd_gnd_dcr = VPD_VDO_GND_IMP(vpd_vdo);
if (max_vbus > VPD_MAX_VBUS_50V)
max_vbus = VPD_MAX_VBUS_20V;
/*
* Valid max_vbus values:
* 20000 mV
* 30000 mV
* 40000 mV
* 50000 mV
*/
max_vbus = 20000 + max_vbus * 10000;
if (*mv > max_vbus)
*mv = max_vbus;
/*
* 5000 mA cable: 150 = 750000 / 50000
* 3000 mA cable: 250 = 750000 / 30000
*/
if (*ma > 3000)
*ma = 750000 / (150 + vpd_vbus_dcr + vpd_gnd_dcr);
else
*ma = 750000 / (250 + vpd_vbus_dcr + vpd_gnd_dcr);
}
uw = *ma * *mv;
/* Mismatch bit set if less power offered than the operating power */
if (uw < (1000 * PD_OPERATING_POWER_MW))
flags |= RDO_CAP_MISMATCH;
#ifdef CONFIG_USB_PD_GIVE_BACK
/* Tell source we are give back capable. */
flags |= RDO_GIVE_BACK;
/*
* BATTERY PDO: Inform the source that the sink will reduce
* power to this minimum level on receipt of a GotoMin Request.
*/
max_or_min_mw = PD_MIN_POWER_MW;
/*
* FIXED or VARIABLE PDO: Inform the source that the sink will
* reduce current to this minimum level on receipt of a GotoMin
* Request.
*/
max_or_min_ma = PD_MIN_CURRENT_MA;
#else
/*
* Can't give back, so set maximum current and power to
* operating level.
*/
max_or_min_ma = *ma;
max_or_min_mw = uw / 1000;
#endif
if ((pdo & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
int mw = uw / 1000;
*rdo = RDO_BATT(pdo_index + 1, mw, max_or_min_mw, flags);
} else {
*rdo = RDO_FIXED(pdo_index + 1, *ma, max_or_min_ma, flags);
}
}
#endif

View File

@ -22,6 +22,7 @@
#include "timer.h"
#include "util.h"
#include "usb_api.h"
#include "usb_common.h"
#include "usb_pd.h"
#include "usbc_ppc.h"
#include "version.h"
@ -104,144 +105,23 @@ static uint8_t pd_src_cap_cnt[CONFIG_USB_PD_PORT_COUNT];
/* Cap on the max voltage requested as a sink (in millivolts) */
static unsigned max_request_mv = PD_MAX_VOLTAGE_MV; /* no cap */
int pd_find_pdo_index(int port, int max_mv, uint32_t *selected_pdo)
const uint32_t * const pd_get_src_caps(int port)
{
int i, uw, mv;
int ret = 0;
int __attribute__((unused)) cur_mv = 0;
int cur_uw = 0;
int prefer_cur;
const uint32_t *src_caps = pd_src_caps[port];
ASSERT(port < CONFIG_USB_PD_PORT_COUNT);
/* max voltage is always limited by this boards max request */
max_mv = MIN(max_mv, PD_MAX_VOLTAGE_MV);
/* Get max power that is under our max voltage input */
for (i = 0; i < pd_src_cap_cnt[port]; i++) {
/* its an unsupported Augmented PDO (PD3.0) */
if ((src_caps[i] & PDO_TYPE_MASK) == PDO_TYPE_AUGMENTED)
continue;
mv = ((src_caps[i] >> 10) & 0x3FF) * 50;
/* Skip invalid voltage */
if (!mv)
continue;
/* Skip any voltage not supported by this board */
if (!pd_is_valid_input_voltage(mv))
continue;
if ((src_caps[i] & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
uw = 250000 * (src_caps[i] & 0x3FF);
} else {
int ma = (src_caps[i] & 0x3FF) * 10;
ma = MIN(ma, PD_MAX_CURRENT_MA);
uw = ma * mv;
}
if (mv > max_mv)
continue;
uw = MIN(uw, PD_MAX_POWER_MW * 1000);
prefer_cur = 0;
/* Apply special rules in case of 'tie' */
#ifdef PD_PREFER_LOW_VOLTAGE
if (uw == cur_uw && mv < cur_mv)
prefer_cur = 1;
#elif defined(PD_PREFER_HIGH_VOLTAGE)
if (uw == cur_uw && mv > cur_mv)
prefer_cur = 1;
#endif
/* Prefer higher power, except for tiebreaker */
if (uw > cur_uw || prefer_cur) {
ret = i;
cur_uw = uw;
cur_mv = mv;
}
}
if (selected_pdo)
*selected_pdo = src_caps[ret];
return ret;
return pd_src_caps[port];
}
void pd_extract_pdo_power(uint32_t pdo, uint32_t *ma, uint32_t *mv)
uint8_t pd_get_src_cap_cnt(int port)
{
int max_ma, uw;
ASSERT(port < CONFIG_USB_PD_PORT_COUNT);
*mv = ((pdo >> 10) & 0x3FF) * 50;
if (*mv == 0) {
CPRINTF("ERR:PDO mv=0\n");
*ma = 0;
return;
}
if ((pdo & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
uw = 250000 * (pdo & 0x3FF);
max_ma = 1000 * MIN(1000 * uw, PD_MAX_POWER_MW) / *mv;
} else {
max_ma = 10 * (pdo & 0x3FF);
max_ma = MIN(max_ma, PD_MAX_POWER_MW * 1000 / *mv);
}
*ma = MIN(max_ma, PD_MAX_CURRENT_MA);
return pd_src_cap_cnt[port];
}
void pd_build_request(int port, uint32_t *rdo, uint32_t *ma, uint32_t *mv,
enum pd_request_type req_type)
uint32_t get_max_request_mv(void)
{
uint32_t pdo;
int pdo_index, flags = 0;
int uw;
int max_or_min_ma;
int max_or_min_mw;
if (req_type == PD_REQUEST_VSAFE5V) {
/* src cap 0 should be vSafe5V */
pdo_index = 0;
pdo = pd_src_caps[port][0];
} else {
/* find pdo index for max voltage we can request */
pdo_index = pd_find_pdo_index(port, max_request_mv, &pdo);
}
pd_extract_pdo_power(pdo, ma, mv);
uw = *ma * *mv;
/* Mismatch bit set if less power offered than the operating power */
if (uw < (1000 * PD_OPERATING_POWER_MW))
flags |= RDO_CAP_MISMATCH;
#ifdef CONFIG_USB_PD_GIVE_BACK
/* Tell source we are give back capable. */
flags |= RDO_GIVE_BACK;
/*
* BATTERY PDO: Inform the source that the sink will reduce
* power to this minimum level on receipt of a GotoMin Request.
*/
max_or_min_mw = PD_MIN_POWER_MW;
/*
* FIXED or VARIABLE PDO: Inform the source that the sink will reduce
* current to this minimum level on receipt of a GotoMin Request.
*/
max_or_min_ma = PD_MIN_CURRENT_MA;
#else
/*
* Can't give back, so set maximum current and power to operating
* level.
*/
max_or_min_ma = *ma;
max_or_min_mw = uw / 1000;
#endif
if ((pdo & PDO_TYPE_MASK) == PDO_TYPE_BATTERY) {
int mw = uw / 1000;
*rdo = RDO_BATT(pdo_index + 1, mw, max_or_min_mw, flags);
} else {
*rdo = RDO_FIXED(pdo_index + 1, *ma, max_or_min_ma, flags);
}
return max_request_mv;
}
void pd_process_source_cap(int port, int cnt, uint32_t *src_caps)
@ -257,7 +137,8 @@ void pd_process_source_cap(int port, int cnt, uint32_t *src_caps)
#ifdef CONFIG_CHARGE_MANAGER
/* Get max power info that we could request */
pd_find_pdo_index(port, PD_MAX_VOLTAGE_MV, &pdo);
pd_find_pdo_index(pd_get_src_cap_cnt(port), pd_get_src_caps(port),
PD_MAX_VOLTAGE_MV, &pdo);
pd_extract_pdo_power(pdo, &ma, &mv);
/* Set max. limit, but apply 500mA ceiling */

View File

@ -1487,9 +1487,11 @@ static int pd_send_request_msg(int port, int always_send_request)
* If this port is not actively charging or we are not allowed to
* request the max voltage, then select vSafe5V
*/
pd_build_request(port, &rdo, &curr_limit, &supply_voltage,
charging && max_request_allowed ?
PD_REQUEST_MAX : PD_REQUEST_VSAFE5V);
pd_build_request(pd_get_src_cap_cnt(port), pd_get_src_caps(port), 0,
&rdo, &curr_limit, &supply_voltage,
charging && max_request_allowed ?
PD_REQUEST_MAX : PD_REQUEST_VSAFE5V,
get_max_request_mv());
if (!always_send_request) {
/* Don't re-request the same voltage */

View File

@ -14,6 +14,7 @@ all-obj-$(CONFIG_USB_PRL_SM)+=$(_usbc_dir)usb_prl_sm.o
ifneq ($(CONFIG_USB_PE_SM),)
all-obj-$(CONFIG_USB_TYPEC_VPD)+=$(_usbc_dir)usb_pe_ctvpd_sm.o
all-obj-$(CONFIG_USB_TYPEC_CTVPD)+=$(_usbc_dir)usb_pe_ctvpd_sm.o
all-obj-$(CONFIG_USB_TYPEC_DRP_ACC_TRYSRC)+=$(_usbc_dir)usb_pe_drp_sm.o
endif
all-obj-$(CONFIG_USB_TYPEC_VPD)+=$(_usbc_dir)usb_tc_vpd_sm.o
all-obj-$(CONFIG_USB_TYPEC_CTVPD)+=$(_usbc_dir)usb_tc_ctvpd_sm.o

View File

@ -75,35 +75,42 @@ void pe_run(int port, int evt, int en)
}
}
void pe_pass_up_message(int port)
void pe_message_received(int port)
{
pe[port].flags |= PE_FLAGS_MSG_RECEIVED;
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
/**
* NOTE:
* The Charge-Through Vconn Powered Device's Policy Engine is very
* simple and no implementation is needed for the following functions
* that might be called by the Protocol Layer.
*/
void pe_hard_reset_sent(int port)
{
/* Do nothing */
/* No implementation needed by this policy engine */
}
void pe_got_hard_reset(int port)
{
/* Do nothing */
/* No implementation needed by this policy engine */
}
void pe_report_error(int port, enum pe_error e)
{
/* Do nothing */
/* No implementation needed by this policy engine */
}
void pe_got_soft_reset(int port)
{
/* Do nothing */
/* No implementation needed by this policy engine */
}
void pe_message_sent(int port)
{
/* Do nothing */
/* No implementation needed by this policy engine */
}
static void pe_request_run(const int port)

4883
common/usbc/usb_pe_drp_sm.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -32,6 +32,26 @@
#include "vpd_api.h"
#include "version.h"
#define RCH_SET_FLAG(port, flag) atomic_or(&rch[port].flags, (flag))
#define RCH_CLR_FLAG(port, flag) atomic_clear(&rch[port].flags, (flag))
#define RCH_CHK_FLAG(port, flag) (rch[port].flags & (flag))
#define TCH_SET_FLAG(port, flag) atomic_or(&tch[port].flags, (flag))
#define TCH_CLR_FLAG(port, flag) atomic_clear(&tch[port].flags, (flag))
#define TCH_CHK_FLAG(port, flag) (tch[port].flags & (flag))
#define PRL_TX_SET_FLAG(port, flag) atomic_or(&prl_tx[port].flags, (flag))
#define PRL_TX_CLR_FLAG(port, flag) atomic_clear(&prl_tx[port].flags, (flag))
#define PRL_TX_CHK_FLAG(port, flag) (prl_tx[port].flags & (flag))
#define PRL_HR_SET_FLAG(port, flag) atomic_or(&prl_hr[port].flags, (flag))
#define PRL_HR_CLR_FLAG(port, flag) atomic_clear(&prl_hr[port].flags, (flag))
#define PRL_HR_CHK_FLAG(port, flag) (prl_hr[port].flags & (flag))
#define PDMSG_SET_FLAG(port, flag) atomic_or(&pdmsg[port].flags, (flag))
#define PDMSG_CLR_FLAG(port, flag) atomic_clear(&pdmsg[port].flags, (flag))
#define PDMSG_CHK_FLAG(port, flag) (pdmsg[port].flags & (flag))
/* Protocol Layer Flags */
#define PRL_FLAGS_TX_COMPLETE BIT(0)
#define PRL_FLAGS_START_AMS BIT(1)
@ -157,7 +177,7 @@ static struct protocol_hard_reset {
/* Chunking Message Object */
static struct pd_message {
/* message status flags */
uint32_t status_flags;
uint32_t flags;
/* SOP* */
enum tcpm_transmit_type xmit_type;
/* type of message */
@ -166,6 +186,8 @@ static struct pd_message {
uint8_t ext;
/* PD revision */
enum pd_rev_type rev;
/* Cable PD revision */
enum pd_rev_type cable_rev;
/* Number of 32-bit objects in chk_buf */
uint16_t data_objs;
/* temp chunk buffer */
@ -242,10 +264,10 @@ void pd_transmit_complete(int port, int status)
void pd_execute_hard_reset(int port)
{
/* Only allow async. function calls when state machine is running */
if (local_state[port] != SM_RUN)
if (!prl_is_running(port))
return;
prl_hr[port].flags |= PRL_FLAGS_PORT_PARTNER_HARD_RESET;
PRL_HR_SET_FLAG(port, PRL_FLAGS_PORT_PARTNER_HARD_RESET);
set_state_prl_hr(port, PRL_HR_RESET_LAYER);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
@ -253,14 +275,19 @@ void pd_execute_hard_reset(int port)
void prl_execute_hard_reset(int port)
{
/* Only allow async. function calls when state machine is running */
if (local_state[port] != SM_RUN)
if (!prl_is_running(port))
return;
prl_hr[port].flags |= PRL_FLAGS_PE_HARD_RESET;
PRL_HR_SET_FLAG(port, PRL_FLAGS_PE_HARD_RESET);
set_state_prl_hr(port, PRL_HR_RESET_LAYER);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
int prl_is_running(int port)
{
return local_state[port] == SM_RUN;
}
static void prl_init(int port)
{
int i;
@ -273,12 +300,13 @@ static void prl_init(int port)
rch[port].flags = 0;
/*
* Initialize to highest revision supported. If the port partner
* doesn't support this revision, the Protocol Engine will lower
* this value to the revision supported by the port partner.
* Initialize to highest revision supported. If the port or cable
* partner doesn't support this revision, the Protocol Engine will
* lower this value to the revision supported by the partner.
*/
pdmsg[port].cable_rev = PD_REV30;
pdmsg[port].rev = PD_REV30;
pdmsg[port].status_flags = 0;
pdmsg[port].flags = 0;
prl_hr[port].flags = 0;
@ -303,17 +331,17 @@ static void prl_init(int port)
void prl_start_ams(int port)
{
prl_tx[port].flags |= PRL_FLAGS_START_AMS;
PRL_TX_SET_FLAG(port, PRL_FLAGS_START_AMS);
}
void prl_end_ams(int port)
{
prl_tx[port].flags |= PRL_FLAGS_END_AMS;
PRL_TX_SET_FLAG(port, PRL_FLAGS_END_AMS);
}
void prl_hard_reset_complete(int port)
{
prl_hr[port].flags |= PRL_FLAGS_HARD_RESET_COMPLETE;
PRL_HR_SET_FLAG(port, PRL_FLAGS_HARD_RESET_COMPLETE);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
@ -326,7 +354,7 @@ void prl_send_ctrl_msg(int port,
pdmsg[port].ext = 0;
emsg[port].len = 0;
tch[port].flags |= PRL_FLAGS_MSG_XMIT;
TCH_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
@ -338,7 +366,7 @@ void prl_send_data_msg(int port,
pdmsg[port].msg_type = msg;
pdmsg[port].ext = 0;
tch[port].flags |= PRL_FLAGS_MSG_XMIT;
TCH_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
@ -350,10 +378,15 @@ void prl_send_ext_data_msg(int port,
pdmsg[port].msg_type = msg;
pdmsg[port].ext = 1;
tch[port].flags |= PRL_FLAGS_MSG_XMIT;
TCH_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
}
void prl_reset(int port)
{
local_state[port] = SM_INIT;
}
void prl_run(int port, int evt, int en)
{
switch (local_state[port]) {
@ -409,6 +442,16 @@ enum pd_rev_type prl_get_rev(int port)
return pdmsg[port].rev;
}
void prl_set_cable_rev(int port, enum pd_rev_type rev)
{
pdmsg[port].cable_rev = rev;
}
enum pd_rev_type prl_get_cable_rev(int port)
{
return pdmsg[port].cable_rev;
}
/* Common Protocol Layer Message Transmission */
static void prl_tx_phy_layer_reset_entry(const int port)
{
@ -434,8 +477,8 @@ static void prl_tx_wait_for_message_request_entry(const int port)
static void prl_tx_wait_for_message_request_run(const int port)
{
if (prl_tx[port].flags & PRL_FLAGS_MSG_XMIT) {
prl_tx[port].flags &= ~PRL_FLAGS_MSG_XMIT;
if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_MSG_XMIT)) {
PRL_TX_CLR_FLAG(port, PRL_FLAGS_MSG_XMIT);
/*
* Soft Reset Message Message pending
*/
@ -453,16 +496,15 @@ static void prl_tx_wait_for_message_request_run(const int port)
}
return;
} else if ((pdmsg[port].rev == PD_REV30) &&
(prl_tx[port].flags &
} else if ((pdmsg[port].rev == PD_REV30) && PRL_TX_CHK_FLAG(port,
(PRL_FLAGS_START_AMS | PRL_FLAGS_END_AMS))) {
if (tc_get_power_role(port) == PD_ROLE_SOURCE) {
/*
* Start of AMS notification received from
* Policy Engine
*/
if (prl_tx[port].flags & PRL_FLAGS_START_AMS) {
prl_tx[port].flags &= ~PRL_FLAGS_START_AMS;
if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_START_AMS)) {
PRL_TX_CLR_FLAG(port, PRL_FLAGS_START_AMS);
set_state_prl_tx(port, PRL_TX_SRC_SOURCE_TX);
return;
}
@ -470,8 +512,8 @@ static void prl_tx_wait_for_message_request_run(const int port)
* End of AMS notification received from
* Policy Engine
*/
else if (prl_tx[port].flags & PRL_FLAGS_END_AMS) {
prl_tx[port].flags &= ~PRL_FLAGS_END_AMS;
else if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_END_AMS)) {
PRL_TX_CLR_FLAG(port, PRL_FLAGS_END_AMS);
/* Set Rp = SinkTxOk */
tcpm_select_rp_value(port, SINK_TX_OK);
tcpm_set_cc(port, TYPEC_CC_RP);
@ -479,8 +521,8 @@ static void prl_tx_wait_for_message_request_run(const int port)
prl_tx[port].flags = 0;
}
} else {
if (prl_tx[port].flags & PRL_FLAGS_START_AMS) {
prl_tx[port].flags &= ~PRL_FLAGS_START_AMS;
if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_START_AMS)) {
PRL_TX_CLR_FLAG(port, PRL_FLAGS_START_AMS);
/*
* First Message in AMS notification
* received from Policy Engine.
@ -521,8 +563,8 @@ static void prl_tx_src_source_tx_entry(const int port)
static void prl_tx_src_source_tx_run(const int port)
{
if (prl_tx[port].flags & PRL_FLAGS_MSG_XMIT) {
prl_tx[port].flags &= ~PRL_FLAGS_MSG_XMIT;
if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_MSG_XMIT)) {
PRL_TX_CLR_FLAG(port, PRL_FLAGS_MSG_XMIT);
set_state_prl_tx(port, PRL_TX_SRC_PENDING);
}
@ -533,8 +575,8 @@ static void prl_tx_src_source_tx_run(const int port)
*/
static void prl_tx_snk_start_ams_run(const int port)
{
if (prl_tx[port].flags & PRL_FLAGS_MSG_XMIT) {
prl_tx[port].flags &= ~PRL_FLAGS_MSG_XMIT;
if (PRL_TX_CHK_FLAG(port, PRL_FLAGS_MSG_XMIT)) {
PRL_TX_CLR_FLAG(port, PRL_FLAGS_MSG_XMIT);
set_state_prl_tx(port, PRL_TX_SNK_PENDING);
}
@ -567,12 +609,21 @@ static void prl_tx_construct_message(int port)
tc_get_data_role(port),
prl_tx[port].msg_id_counter[pdmsg[port].xmit_type],
pdmsg[port].data_objs,
pdmsg[port].rev,
(prl_tx[port].sop == TCPC_TX_SOP) ?
pdmsg[port].rev : pdmsg[port].cable_rev,
pdmsg[port].ext);
/* Save SOP* so the correct msg_id_counter can be incremented */
prl_tx[port].sop = pdmsg[port].xmit_type;
/*
* These flags could be set if this function is called before the
* Policy Engine is informed of the previous transmission. Clear the
* flags so that this message can be sent.
*/
prl_tx[port].xmit_status = TCPC_TX_UNSET;
PDMSG_CLR_FLAG(port, PRL_FLAGS_TX_COMPLETE);
/* Pass message to PHY Layer */
tcpm_transmit(port, pdmsg[port].xmit_type, header,
pdmsg[port].chk_buf);
@ -623,7 +674,7 @@ static void prl_tx_wait_for_phy_response_run(const int port)
* State tch_wait_for_transmission_complete will
* inform policy engine of error
*/
pdmsg[port].status_flags |= PRL_FLAGS_TX_ERROR;
PDMSG_SET_FLAG(port, PRL_FLAGS_TX_ERROR);
/* Increment message id counter */
increment_msgid_counter(port);
@ -644,7 +695,7 @@ static void prl_tx_wait_for_phy_response_run(const int port)
/* Increment messageId counter */
increment_msgid_counter(port);
/* Inform Policy Engine Message was sent */
pdmsg[port].status_flags |= PRL_FLAGS_TX_COMPLETE;
PDMSG_SET_FLAG(port, PRL_FLAGS_TX_COMPLETE);
set_state_prl_tx(port, PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
return;
}
@ -727,10 +778,9 @@ static void prl_hr_wait_for_request_entry(const int port)
static void prl_hr_wait_for_request_run(const int port)
{
if (prl_hr[port].flags & PRL_FLAGS_PE_HARD_RESET ||
prl_hr[port].flags & PRL_FLAGS_PORT_PARTNER_HARD_RESET) {
if (PRL_HR_CHK_FLAG(port, PRL_FLAGS_PE_HARD_RESET |
PRL_FLAGS_PORT_PARTNER_HARD_RESET))
set_state_prl_hr(port, PRL_HR_RESET_LAYER);
}
}
/*
@ -748,6 +798,25 @@ static void prl_hr_reset_layer_entry(const int port)
* PRL_Tx_Wait_For_Message_Request state.
*/
set_state_prl_tx(port, PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
tch[port].flags = 0;
rch[port].flags = 0;
pdmsg[port].flags = 0;
/* Reset message ids */
for (i = 0; i < NUM_XMIT_TYPES; i++) {
prl_rx[port].msg_id[i] = -1;
prl_tx[port].msg_id_counter[i] = 0;
}
/* Disable RX */
if (IS_ENABLED(CONFIG_USB_TYPEC_CTVPD) ||
IS_ENABLED(CONFIG_USB_TYPEC_VPD))
vpd_rx_enable(0);
else
tcpm_set_rx_enable(port, 0);
return;
}
static void prl_hr_reset_layer_run(const int port)
@ -756,7 +825,7 @@ static void prl_hr_reset_layer_run(const int port)
* Protocol Layer reset Complete &
* Hard Reset was initiated by Policy Engine
*/
if (prl_hr[port].flags & PRL_FLAGS_PE_HARD_RESET) {
if (PRL_HR_CHK_FLAG(port, PRL_FLAGS_PE_HARD_RESET)) {
/* Request PHY to perform a Hard Reset */
prl_send_ctrl_msg(port, TCPC_TX_HARD_RESET, 0);
set_state_prl_hr(port, PRL_HR_WAIT_FOR_PHY_HARD_RESET_COMPLETE);
@ -788,7 +857,7 @@ static void prl_hr_wait_for_phy_hard_reset_complete_run(const int port)
* Wait for hard reset from PHY
* or timeout
*/
if ((pdmsg[port].status_flags & PRL_FLAGS_TX_COMPLETE) ||
if (PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_COMPLETE) ||
(get_time().val > prl_hr[port].hard_reset_complete_timer)) {
/* PRL_HR_PHY_Hard_Reset_Requested */
@ -808,7 +877,7 @@ static void prl_hr_wait_for_pe_hard_reset_complete_run(const int port)
/*
* Wait for Hard Reset complete indication from Policy Engine
*/
if (prl_hr[port].flags & PRL_FLAGS_HARD_RESET_COMPLETE)
if (PRL_HR_CHK_FLAG(port, PRL_FLAGS_HARD_RESET_COMPLETE))
set_state_prl_hr(port, PRL_HR_WAIT_FOR_REQUEST);
}
@ -837,24 +906,19 @@ static void copy_chunk_to_ext(int port)
/*
* Chunked Rx State Machine
*/
static inline void rch_clear_abort_set_chunking(int port)
static void rch_wait_for_message_from_protocol_layer_entry(const int port)
{
/* Clear Abort flag */
pdmsg[port].status_flags &= ~PRL_FLAGS_ABORT;
PDMSG_CLR_FLAG(port, PRL_FLAGS_ABORT);
/* All Messages are chunked */
rch[port].flags = PRL_FLAGS_CHUNKING;
}
static void rch_wait_for_message_from_protocol_layer_entry(const int port)
{
rch_clear_abort_set_chunking(port);
}
static void rch_wait_for_message_from_protocol_layer_run(const int port)
{
if (rch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
rch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
if (RCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
RCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
/*
* Are we communicating with a PD3.0 device and is
* this an extended message?
@ -868,17 +932,17 @@ static void rch_wait_for_message_from_protocol_layer_run(const int port)
* Received Extended Message &
* (Chunking = 1 & Chunked = 1)
*/
if ((rch[port].flags & PRL_FLAGS_CHUNKING) && chunked) {
if ((RCH_CHK_FLAG(port, PRL_FLAGS_CHUNKING)) &&
chunked) {
set_state_rch(port,
RCH_PROCESSING_EXTENDED_MESSAGE);
return;
}
/*
* (Received Extended Message &
* (Chunking = 0 & Chunked = 0))
*/
else if (!(rch[port].flags &
PRL_FLAGS_CHUNKING) && !chunked) {
else if (!RCH_CHK_FLAG(port, PRL_FLAGS_CHUNKING) &&
!chunked) {
/* Copy chunk to extended buffer */
copy_chunk_to_ext(port);
set_state_rch(port, RCH_PASS_UP_MESSAGE);
@ -888,7 +952,6 @@ static void rch_wait_for_message_from_protocol_layer_run(const int port)
*/
else {
set_state_rch(port, RCH_REPORT_ERROR);
return;
}
}
/*
@ -905,7 +968,6 @@ static void rch_wait_for_message_from_protocol_layer_run(const int port)
*/
else {
set_state_rch(port, RCH_REPORT_ERROR);
return;
}
}
}
@ -916,7 +978,7 @@ static void rch_wait_for_message_from_protocol_layer_run(const int port)
static void rch_pass_up_message_entry(const int port)
{
/* Pass Message to Policy Engine */
pe_pass_up_message(port);
pe_message_received(port);
set_state_rch(port, RCH_WAIT_FOR_MESSAGE_FROM_PROTOCOL_LAYER);
}
@ -951,9 +1013,9 @@ static void rch_processing_extended_message_run(const int port)
/*
* Abort Flag Set
*/
if (pdmsg[port].status_flags & PRL_FLAGS_ABORT) {
if (PDMSG_CHK_FLAG(port, PRL_FLAGS_ABORT))
set_state_rch(port, RCH_WAIT_FOR_MESSAGE_FROM_PROTOCOL_LAYER);
}
/*
* If expected Chunk Number:
* Append data to Extended_Message_Buffer
@ -1019,7 +1081,7 @@ static void rch_requesting_chunk_entry(const int port)
pdmsg[port].data_objs = 1;
pdmsg[port].ext = 1;
prl_tx[port].flags |= PRL_FLAGS_MSG_XMIT;
PRL_TX_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_TX, 0);
}
@ -1029,8 +1091,8 @@ static void rch_requesting_chunk_run(const int port)
* Transmission Error from Protocol Layer or
* Message Received From Protocol Layer
*/
if (rch[port].flags & PRL_FLAGS_MSG_RECEIVED ||
pdmsg[port].status_flags & PRL_FLAGS_TX_ERROR) {
if (RCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED) ||
PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_ERROR)) {
/*
* Leave PRL_FLAGS_MSG_RECEIVED flag set. It'll be
* cleared in rch_report_error state
@ -1040,8 +1102,8 @@ static void rch_requesting_chunk_run(const int port)
/*
* Message Transmitted received from Protocol Layer
*/
else if (pdmsg[port].status_flags & PRL_FLAGS_TX_COMPLETE) {
pdmsg[port].status_flags &= ~PRL_FLAGS_TX_COMPLETE;
else if (PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_COMPLETE)) {
PDMSG_CLR_FLAG(port, PRL_FLAGS_TX_COMPLETE);
set_state_rch(port, RCH_WAITING_CHUNK);
}
}
@ -1060,7 +1122,7 @@ static void rch_waiting_chunk_entry(const int port)
static void rch_waiting_chunk_run(const int port)
{
if ((rch[port].flags & PRL_FLAGS_MSG_RECEIVED)) {
if (RCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
/*
* Leave PRL_FLAGS_MSG_RECEIVED flag set just in case an error
* is detected. If an error is detected, PRL_FLAGS_MSG_RECEIVED
@ -1084,7 +1146,7 @@ static void rch_waiting_chunk_run(const int port)
* No error wad detected, so clear
* PRL_FLAGS_MSG_RECEIVED flag.
*/
rch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
RCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
set_state_rch(port,
RCH_PROCESSING_EXTENDED_MESSAGE);
}
@ -1107,13 +1169,13 @@ static void rch_report_error_entry(const int port)
* If the state was entered because a message was received,
* this message is passed to the Policy Engine.
*/
if (rch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
rch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
if (RCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
RCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
/* Copy chunk to extended buffer */
copy_chunk_to_ext(port);
/* Pass Message to Policy Engine */
pe_pass_up_message(port);
pe_message_received(port);
/* Report error */
pe_report_error(port, ERR_RCH_MSG_REC);
} else {
@ -1133,7 +1195,7 @@ static void rch_report_error_run(const int port)
static inline void tch_clear_abort_set_chunking(int port)
{
/* Clear Abort flag */
pdmsg[port].status_flags &= ~PRL_FLAGS_ABORT;
PDMSG_CLR_FLAG(port, PRL_FLAGS_ABORT);
/* All Messages are chunked */
tch[port].flags = PRL_FLAGS_CHUNKING;
@ -1149,12 +1211,11 @@ static void tch_wait_for_message_request_from_pe_run(const int port)
/*
* Any message received and not in state TCH_Wait_Chunk_Request
*/
if (tch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
tch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
TCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
set_state_tch(port, TCH_MESSAGE_RECEIVED);
return;
} else if (tch[port].flags & PRL_FLAGS_MSG_XMIT) {
tch[port].flags &= ~PRL_FLAGS_MSG_XMIT;
} else if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_XMIT)) {
TCH_CLR_FLAG(port, PRL_FLAGS_MSG_XMIT);
/*
* Rx Chunking State != RCH_Wait_For_Message_From_Protocol_Layer
* & Abort Supported
@ -1171,7 +1232,7 @@ static void tch_wait_for_message_request_from_pe_run(const int port)
* Extended Message Request & Chunking
*/
if ((pdmsg[port].rev == PD_REV30) && pdmsg[port].ext &&
(tch[port].flags & PRL_FLAGS_CHUNKING)) {
TCH_CHK_FLAG(port, PRL_FLAGS_CHUNKING)) {
pdmsg[port].send_offset = 0;
pdmsg[port].chunk_number_to_send = 0;
set_state_tch(port,
@ -1205,12 +1266,10 @@ static void tch_wait_for_message_request_from_pe_run(const int port)
pdmsg[port].data_objs =
(emsg[port].len + 3) >> 2;
/* Pass Message to Protocol Layer */
prl_tx[port].flags |= PRL_FLAGS_MSG_XMIT;
PRL_TX_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
set_state_tch(port,
TCH_WAIT_FOR_TRANSMISSION_COMPLETE);
}
return;
}
}
}
@ -1220,34 +1279,31 @@ static void tch_wait_for_message_request_from_pe_run(const int port)
*/
static void tch_wait_for_transmission_complete_run(const int port)
{
/*
* Any message received and not in state TCH_Wait_Chunk_Request
*/
if (tch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
tch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
set_state_tch(port, TCH_MESSAGE_RECEIVED);
return;
}
/*
* Inform Policy Engine that Message was sent.
*/
if (pdmsg[port].status_flags & PRL_FLAGS_TX_COMPLETE) {
pdmsg[port].status_flags &= ~PRL_FLAGS_TX_COMPLETE;
set_state_tch(port, TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
if (PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_COMPLETE)) {
PDMSG_CLR_FLAG(port, PRL_FLAGS_TX_COMPLETE);
/* Tell PE message was sent */
pe_message_sent(port);
set_state_tch(port, TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
}
/*
* Inform Policy Engine of Tx Error
*/
else if (pdmsg[port].status_flags & PRL_FLAGS_TX_ERROR) {
pdmsg[port].status_flags &= ~PRL_FLAGS_TX_ERROR;
else if (PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_ERROR)) {
PDMSG_CLR_FLAG(port, PRL_FLAGS_TX_ERROR);
/* Tell PE an error occurred */
pe_report_error(port, ERR_TCH_XMIT);
set_state_tch(port, TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
}
/*
* Any message received and not in state TCH_Wait_Chunk_Request
*/
else if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
TCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
set_state_tch(port, TCH_MESSAGE_RECEIVED);
}
}
/*
@ -1262,11 +1318,12 @@ static void tch_construct_chunked_message_entry(const int port)
/*
* Any message received and not in state TCH_Wait_Chunk_Request
*/
if (tch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
tch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
TCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
set_state_tch(port, TCH_MESSAGE_RECEIVED);
return;
}
/* Prepare to copy chunk into chk_buf */
ext_hdr = (uint16_t *)pdmsg[port].chk_buf;
@ -1297,13 +1354,12 @@ static void tch_construct_chunked_message_entry(const int port)
pdmsg[port].data_objs = (num + 2 + 3) >> 2;
/* Pass message chunk to Protocol Layer */
prl_tx[port].flags |= PRL_FLAGS_MSG_XMIT;
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
PRL_TX_SET_FLAG(port, PRL_FLAGS_MSG_XMIT);
}
static void tch_construct_chunked_message_run(const int port)
{
if (pdmsg[port].status_flags & PRL_FLAGS_ABORT)
if (PDMSG_CHK_FLAG(port, PRL_FLAGS_ABORT))
set_state_tch(port, TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
else
set_state_tch(port, TCH_SENDING_CHUNKED_MESSAGE);
@ -1314,19 +1370,10 @@ static void tch_construct_chunked_message_run(const int port)
*/
static void tch_sending_chunked_message_run(const int port)
{
/*
* Any message received and not in state TCH_Wait_Chunk_Request
*/
if (tch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
tch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
set_state_tch(port, TCH_MESSAGE_RECEIVED);
return;
}
/*
* Transmission Error
*/
if (pdmsg[port].status_flags & PRL_FLAGS_TX_ERROR) {
if (PDMSG_CHK_FLAG(port, PRL_FLAGS_TX_ERROR)) {
pe_report_error(port, ERR_TCH_XMIT);
set_state_tch(port, TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
}
@ -1340,6 +1387,13 @@ static void tch_sending_chunked_message_run(const int port)
/* Tell PE message was sent */
pe_message_sent(port);
}
/*
* Any message received and not in state TCH_Wait_Chunk_Request
*/
else if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
TCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
set_state_tch(port, TCH_MESSAGE_RECEIVED);
}
/*
* Message Transmitted from Protocol Layer &
* Not Last Chunk
@ -1362,8 +1416,8 @@ static void tch_wait_chunk_request_entry(const int port)
static void tch_wait_chunk_request_run(const int port)
{
if (tch[port].flags & PRL_FLAGS_MSG_RECEIVED) {
tch[port].flags &= ~PRL_FLAGS_MSG_RECEIVED;
if (TCH_CHK_FLAG(port, PRL_FLAGS_MSG_RECEIVED)) {
TCH_CLR_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
if (PD_HEADER_EXT(emsg[port].header)) {
uint16_t exthdr;
@ -1415,9 +1469,7 @@ static void tch_wait_chunk_request_run(const int port)
static void tch_message_received_entry(const int port)
{
/* Pass message to chunked Rx */
rch[port].flags |= PRL_FLAGS_MSG_RECEIVED;
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);
RCH_SET_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
}
static void tch_message_received_run(const int port)
@ -1447,6 +1499,16 @@ static void prl_rx_wait_for_phy_message(const int port, int evt)
msid = PD_HEADER_ID(header);
sop = PD_HEADER_GET_SOP(header);
/*
* Ignore messages sent to the cable from our
* port partner if we aren't Vconn powered device.
*/
if (!IS_ENABLED(CONFIG_USB_TYPEC_CTVPD) &&
!IS_ENABLED(CONFIG_USB_TYPEC_VPD) &&
PD_HEADER_GET_SOP(header) != PD_MSG_SOP &&
PD_HEADER_PROLE(header) == PD_PLUG_DFP_UFP)
return;
if (cnt == 0 && type == PD_CTRL_SOFT_RESET) {
int i;
@ -1493,7 +1555,7 @@ static void prl_rx_wait_for_phy_message(const int port, int evt)
if (cnt == 0 && type == PD_CTRL_PING) {
/* NOTE: RTR_PING State embedded here. */
emsg[port].len = 0;
pe_pass_up_message(port);
pe_message_received(port);
return;
}
/*
@ -1506,7 +1568,7 @@ static void prl_rx_wait_for_phy_message(const int port, int evt)
* Send Message to Tx Chunk
* Chunk State Machine
*/
tch[port].flags |= PRL_FLAGS_MSG_RECEIVED;
TCH_SET_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
}
/*
* Message (not Ping) Received from
@ -1518,7 +1580,7 @@ static void prl_rx_wait_for_phy_message(const int port, int evt)
* Send Message to Rx
* Chunk State Machine
*/
rch[port].flags |= PRL_FLAGS_MSG_RECEIVED;
RCH_SET_FLAG(port, PRL_FLAGS_MSG_RECEIVED);
}
task_set_event(PD_PORT_TO_TASK_ID(port), PD_EVENT_SM, 0);

File diff suppressed because it is too large Load Diff

View File

@ -4918,4 +4918,22 @@
#endif
#endif /* CONFIG_ACCEL_FIFO */
/*
* If USB PD Discharge is enabled, verify that CONFIG_USB_PD_DISCHARGE_GPIO
* and CONFIG_USB_PD_PORT_COUNT, CONFIG_USB_PD_DISCHARGE_TCPC, or
* CONFIG_USB_PD_DISCHARGE_PPC is defined.
*/
#ifdef CONFIG_USB_PD_DISCHARGE
#ifdef CONFIG_USB_PD_DISCHARGE_GPIO
#if !defined(CONFIG_USB_PD_PORT_COUNT)
#error "PD discharge port not defined"
#endif
#else
#if !defined(CONFIG_USB_PD_DISCHARGE_TCPC) && \
!defined(CONFIG_USB_PD_DISCHARGE_PPC)
#error "PD discharge implementation not defined"
#endif
#endif /* CONFIG_USB_PD_DISCHARGE_GPIO */
#endif /* CONFIG_USB_PD_DISCHARGE */
#endif /* __CROS_EC_CONFIG_H */

View File

@ -34,4 +34,42 @@ typec_current_t usb_get_typec_current_limit(enum pd_cc_polarity_type polarity,
enum pd_cc_polarity_type get_snk_polarity(enum tcpc_cc_voltage_status cc1,
enum tcpc_cc_voltage_status cc2);
/**
* Find PDO index that offers the most amount of power and stays within
* max_mv voltage.
*
* @param src_cap_cnt
* @param src_caps
* @param max_mv maximum voltage (or -1 if no limit)
* @param pdo raw pdo corresponding to index, or index 0 on error (output)
* @return index of PDO within source cap packet
*/
int pd_find_pdo_index(uint32_t src_cap_cnt, const uint32_t * const src_caps,
int max_mv, uint32_t *selected_pdo);
/**
* Extract power information out of a Power Data Object (PDO)
*
* @param pdo raw pdo to extract
* @param ma current of the PDO (output)
* @param mv voltage of the PDO (output)
*/
void pd_extract_pdo_power(uint32_t pdo, uint32_t *ma, uint32_t *mv);
/**
* Decide which PDO to choose from the source capabilities.
*
* @param src_cap_cnt
* @param src_caps
* @param rdo requested Request Data Object.
* @param ma selected current limit (stored on success)
* @param mv selected supply voltage (stored on success)
* @param req_type request type
* @param max_request_mv max voltage a sink can request before getting
* source caps
*/
void pd_build_request(uint32_t src_cap_cnt, const uint32_t * const src_caps,
int32_t vpd_vdo, uint32_t *rdo, uint32_t *ma, uint32_t *mv,
enum pd_request_type req_type, uint32_t max_request_mv);
#endif /* __CROS_EC_USB_COMMON_H */

View File

@ -160,6 +160,18 @@ enum pd_rx_errors {
#define BDO(mode, cnt) ((mode) | ((cnt) & 0xFFFF))
#define BIST_MODE(n) ((n) >> 28)
#define BIST_ERROR_COUNTER(n) ((n) & 0xffff)
#define BIST_RECEIVER_MODE 0
#define BIST_TRANSMIT_MODE 1
#define BIST_RETURNED_COUNTER 2
#define BIST_CARRIER_MODE_0 3
#define BIST_CARRIER_MODE_1 4
#define BIST_CARRIER_MODE_2 5
#define BIST_CARRIER_MODE_3 6
#define BIST_EYE_PATTERN 7
#define BIST_TEST_DATA 8
#define SVID_DISCOVERY_MAX 16
/* Timers */
@ -190,6 +202,7 @@ enum pd_rx_errors {
#define PD_T_NO_RESPONSE (5500*MSEC) /* between 4.5s and 5.5s */
#define PD_T_BIST_TRANSMIT (50*MSEC) /* 50ms (used for task_wait arg) */
#define PD_T_BIST_RECEIVE (60*MSEC) /* 60ms (max time to process bist) */
#define PD_T_BIST_CONT_MODE (60*MSEC) /* 30ms to 60ms */
#define PD_T_VCONN_SOURCE_ON (100*MSEC) /* 100ms */
#define PD_T_DRP_TRY (125*MSEC) /* btween 75 and 150ms(monitor Vbus) */
#define PD_T_TRY_TIMEOUT (550*MSEC) /* between 550ms and 1100ms */
@ -201,6 +214,8 @@ enum pd_rx_errors {
#define PD_T_SWAP_SOURCE_START (25*MSEC) /* Min of 20ms */
#define PD_T_RP_VALUE_CHANGE (20*MSEC) /* 20ms */
#define PD_T_SRC_DISCONNECT (15*MSEC) /* 15ms */
#define PD_T_VCONN_STABLE (50*MSEC) /* 50ms */
#define PD_T_DISCOVER_IDENTITY (45*MSEC) /* between 40ms and 50ms */
/* number of edges and time window to detect CC line is not idle */
#define PD_RX_TRANSITION_COUNT 3
@ -308,7 +323,7 @@ struct pd_policy {
* VDO : Vendor Defined Message Object
* VDM object is minimum of VDM header + 6 additional data objects.
*/
#define VDO_HDR_SIZE 1
#define VDO_MAX_SIZE 7
#define VDM_VER10 0
@ -784,6 +799,11 @@ struct pd_cable {
#define PD_VDO_SVID_SVID0(vdo) ((vdo) >> 16)
#define PD_VDO_SVID_SVID1(vdo) ((vdo) & 0xffff)
#define VPD_VDO_MAX_VBUS(vdo) (((vdo) >> 15) & 0x3)
#define VPD_VDO_VBUS_IMP(vdo) (((vdo) >> 7) & 0x3f)
#define VPD_VDO_GND_IMP(vdo) (((vdo) >> 1) & 0x3f)
#define VPD_VDO_CTS(vdo) ((vdo) & 1)
/*
* Google modes capabilities
* <31:8> : reserved
@ -936,6 +956,8 @@ struct pd_cable {
/* Other Vendor IDs */
#define USB_VID_APPLE 0x05ac
#define USB_PID1_APPLE 0x1012
#define USB_PID2_APPLE 0x1013
/* Timeout for message receive in microseconds */
#define USB_PD_RX_TMOUT_US 1800
@ -1225,15 +1247,22 @@ enum pd_rev_type {
};
/* Power role */
#define PD_ROLE_SINK 0
#define PD_ROLE_SOURCE 1
enum pd_power_role {
PD_ROLE_SINK,
PD_ROLE_SOURCE
};
/* Data role */
enum pd_data_role {
PD_ROLE_UFP,
PD_ROLE_DFP,
PD_ROLE_DISCONNECTED
};
/* Cable plug */
#define PD_PLUG_DFP_UFP 0
#define PD_PLUG_CABLE_VPD 1
/* Data role */
#define PD_ROLE_UFP 0
#define PD_ROLE_DFP 1
#define PD_ROLE_DISCONNECTED 2
/* Vconn role */
#define PD_ROLE_VCONN_OFF 0
#define PD_ROLE_VCONN_ON 1
@ -1285,6 +1314,7 @@ enum pd_rev_type {
*/
#define PD_HEADER_TYPE(header) ((header) & 0x1F)
#define PD_HEADER_ID(header) (((header) >> 9) & 7)
#define PD_HEADER_PROLE(header) (((header) >> 8) & 1)
#define PD_HEADER_REV(header) (((header) >> 6) & 3)
#define PD_HEADER_DROLE(header) (((header) >> 5) & 1)
@ -1369,17 +1399,6 @@ int pd_get_vdo_ver(int port);
#define pd_get_rev(n) PD_REV20
#define pd_get_vdo_ver(n) VDM_VER10
#endif
/**
* Decide which PDO to choose from the source capabilities.
*
* @param port USB-C port number
* @param rdo requested Request Data Object.
* @param ma selected current limit (stored on success)
* @param mv selected supply voltage (stored on success)
* @param req_type request type
*/
void pd_build_request(int port, uint32_t *rdo, uint32_t *ma, uint32_t *mv,
enum pd_request_type req_type);
/**
* Check if max voltage request is allowed (only used if
@ -1430,26 +1449,6 @@ void pd_prevent_low_power_mode(int port, int prevent);
*/
void pd_process_source_cap(int port, int cnt, uint32_t *src_caps);
/**
* Find PDO index that offers the most amount of power and stays within
* max_mv voltage.
*
* @param port USB-C port number
* @param max_mv maximum voltage (or -1 if no limit)
* @param pdo raw pdo corresponding to index, or index 0 on error (output)
* @return index of PDO within source cap packet
*/
int pd_find_pdo_index(int port, int max_mv, uint32_t *pdo);
/**
* Extract power information out of a Power Data Object (PDO)
*
* @param pdo raw pdo to extract
* @param ma current of the PDO (output)
* @param mv voltage of the PDO (output)
*/
void pd_extract_pdo_power(uint32_t pdo, uint32_t *ma, uint32_t *mv);
/**
* Reduce the sink power consumption to a minimum value.
*
@ -2209,6 +2208,25 @@ int pd_ts_dts_plugged(int port);
*/
int pd_capable(int port);
/**
* Returns the source caps list
*
* @param port USB-C port number
*/
const uint32_t * const pd_get_src_caps(int port);
/**
* Returns the number of source caps
*
* @param port USB-C port number
*/
uint8_t pd_get_src_cap_cnt(int port);
/**
* Returns the maximum request voltage before receiving a source caps
*
*/
uint32_t get_max_request_mv(void);
/**
* Return true if partner port is capable of communication over USB data

View File

@ -10,6 +10,7 @@
#include "usb_sm.h"
/* Policy Engine Receive and Transmit Errors */
enum pe_error {
ERR_RCH_CHUNKED,
ERR_RCH_MSG_REC,
@ -17,6 +18,25 @@ enum pe_error {
ERR_TCH_XMIT,
};
/*
* Device Policy Manager Requests.
* NOTE: These are usually set by host commands from the AP.
*/
enum pe_dpm_request {
DPM_REQUEST_DR_SWAP = BIT(0),
DPM_REQUEST_PR_SWAP = BIT(1),
DPM_REQUEST_VCONN_SWAP = BIT(2),
DPM_REQUEST_GOTO_MIN = BIT(3),
DPM_REQUEST_SRC_CAP_CHANGE = BIT(4),
DPM_REQUEST_GET_SNK_CAPS = BIT(5),
DPM_REQUEST_SEND_PING = BIT(6),
DPM_REQUEST_SOURCE_CAP = BIT(7),
DPM_REQUEST_NEW_POWER_LEVEL = BIT(8),
DPM_REQUEST_DISCOVER_IDENTITY = BIT(9),
DPM_REQUEST_EXIT_DP_MODE = BIT(10),
DPM_REQUEST_SVDM = BIT(11),
};
/**
* Initialize the Policy Engine State Machine
*
@ -44,17 +64,17 @@ void pe_message_sent(int port);
* Informs the Policy Engine of an error.
*
* @param port USB-C port number
* @parm e error
* @param e error
*/
void pe_report_error(int port, enum pe_error e);
/**
* Informs the Policy Engine that a message has been received
* Called by the Protocol Layer to informs the Policy Engine
* that a message has been received.
*
* @param port USB-C port number
* @parm e error
*/
void pe_pass_up_message(int port);
void pe_message_received(int port);
/**
* Informs the Policy Engine that a hard reset was received.
@ -77,5 +97,71 @@ void pe_got_soft_reset(int port);
*/
void pe_hard_reset_sent(int port);
/**
* Exit DP mode
*
* @param port USB-C port number
*/
void pe_exit_dp_mode(int port);
/**
* Get the id of the current Policy Engine state
*
* @param port USB-C port number
*/
enum pe_states pe_get_state_id(int port);
/**
* Indicates if the Policy Engine State Machine is running.
*
* @param port USB-C port number
* @return 1 if policy engine state machine is running, else 0
*/
int pe_is_running(int port);
/**
* Informs the Policy Engine that the Power Supply is at it's default state
*
* @param port USB-C port number
*/
void pe_ps_reset_complete(int port);
/**
* Informs the Policy Engine that a VCONN Swap has completed
*
* @param port USB-C port number
*/
void pe_vconn_swap_complete(int port);
/**
* Instructs the Policy Engine to send a Vendor Defined Message
*
* @param port USB-C port number
* @param vid Vendor ID
* @param cmd Vendor Defined Command
* @param data Vendor Defined Data
* @param count Size of Vendor Defined Data in 32-bit objects
*/
void pe_send_vdm(int port, uint32_t vid, int cmd, const uint32_t *data,
int count);
/**
* Indicates if an explicit contract is in place
*
* @param port USB-C port number
* @return 1 if an explicit contract is in place, else 0
*/
int pe_is_explicit_contract(int port);
/**
* Instruct the Policy Engine to perform a Device Policy Manager Request
* This function is called from the Device Policy Manager and only has effect
* if the current Policy Engine state is Src.Ready or Snk.Ready.
*
* @param port USB-C port number
* @param req Device Policy Manager Request
*/
void pe_dpm_request(int port, enum pe_dpm_request req);
#endif /* __CROS_EC_USB_PE_H */

View File

@ -17,6 +17,21 @@
*/
#define N_RETRY_COUNT 2
/**
* Returns true if Protocol Layer State Machine is in run mode
*
* @param port USB-C port number
* @return 1 if state machine is running, else 0
*/
int prl_is_running(int port);
/**
* Resets the Protocol Layer State Machine
*
* @param port USB-C port number
*/
void prl_reset(int port);
/**
* Runs the Protocol Layer State Machine
*

View File

@ -31,6 +31,22 @@
*/
#define TYPE_C_AUDIO_ACC_CURRENT 500 /* mA */
/**
* Returns true if TypeC State machine is in attached source state.
*
* @param port USB-C port number
* @return 1 if in attached source state, else 0
*/
int tc_is_attached_src(int port);
/**
* Returns true if TypeC State machine is in attached sink state.
*
* @param port USB-C port number
* @return 1 if in attached source state, else 0
*/
int tc_is_attached_snk(int port);
/**
* Get current data role
*
@ -72,6 +88,14 @@ uint8_t tc_get_pd_enabled(int port);
*/
void tc_set_power_role(int port, int role);
/**
* Set the data role
*
* @param port USB-C port number
* @param role data role
*/
void tc_set_data_role(int port, int role);
/**
* Sets the USB Mux depending on current data role
* Mux is connected except when:
@ -90,6 +114,51 @@ void set_usb_mux_with_current_data_role(int port);
*/
uint64_t tc_get_timeout(int port);
/**
* Policy Engine informs the Type-C state machine if the port partner
* is dualrole power.
*
* @param port USB_C port number
* @param en 1 if port partner is dualrole power, else 0
*/
void tc_partner_dr_power(int port, int en);
/**
* Policy Engine informs the Type-C state machine if the port partner
* has external power
*
* @param port USB_C port number
* @param en 1 if port partner has external power, else 0
*/
void tc_partner_extpower(int port, int en);
/**
* Policy Engine informs the Type-C state machine if the port partner
* is USB comms.
*
* @param port USB_C port number
* @param en 1 if port partner is USB comms, else 0
*/
void tc_partner_usb_comm(int port, int en);
/**
* Policy Engine informs the Type-C state machine if the port partner
* is dualrole data.
*
* @param port USB_C port number
* @param en 1 if port partner is dualrole data, else 0
*/
void tc_partner_dr_data(int port, int en);
/**
* Policy Engine informs the Type-C state machine if the port partner
* had a previous pd connection
*
* @param port USB_C port number
* @param en 1 if port partner had a previous pd connection, else 0
*/
void tc_pd_connection(int port, int en);
/**
* Set loop timeout value
*
@ -98,6 +167,120 @@ uint64_t tc_get_timeout(int port);
*/
void tc_set_timeout(int port, uint64_t timeout);
/**
* Initiates a Power Role Swap from Attached.SRC to Attached.SNK. This function
* has no effect if the current Type-C state is not Attached.SRC.
*
* @param port USB_C port number
*/
void tc_prs_src_snk_assert_rd(int port);
/**
* Initiates a Power Role Swap from Attached.SNK to Attached.SRC. This function
* has no effect if the current Type-C state is not Attached.SNK.
*
* @param port USB_C port number
*/
void tc_prs_snk_src_assert_rp(int port);
/**
* Informs the Type-C State Machine that a Power Role Swap is complete.
* This function is called from the Policy Engine.
*
* @param port USB_C port number
*/
void tc_pr_swap_complete(int port);
/**
* Informs the Type-C State Machine that a Discover Identity is in progress.
* This function is called from the Policy Engine.
*
* @param port USB_C port number
*/
void tc_disc_ident_in_progress(int port);
/**
* Informs the Type-C State Machine that a Discover Identity is complete.
* This function is called from the Policy Engine.
*
* @param port USB_C port number
*/
void tc_disc_ident_complete(int port);
/**
* Instructs the Attached.SNK to stop drawing power. This function is called
* from the Policy Engine and only has effect if the current Type-C state
* Attached.SNK.
*
* @param port USB_C port number
*/
void tc_snk_power_off(int port);
/**
* Instructs the Attached.SRC to stop supplying power. The function has
* no effect if the current Type-C state is not Attached.SRC.
*
* @param port USB_C port number
*/
void tc_src_power_off(int port);
/**
* Instructs the Attached.SRC to start supplying power. The function has
* no effect if the current Type-C state is not Attached.SRC.
*
* @param port USB_C port number
*/
int tc_src_power_on(int port);
/**
* Tests if a VCONN Swap is possible.
*
* @param port USB_C port number
* @return 1 if vconn swap is possible, else 0
*/
int tc_check_vconn_swap(int port);
#ifdef CONFIG_USBC_VCONN
/**
* Checks if VCONN is being sourced.
*
* @param port USB_C port number
* @return 1 if vconn is being sourced, 0 if it's not, and -1 if
* can't answer at this time. -1 is returned if the current
* Type-C state is not Attached.SRC or Attached.SNK.
*/
int tc_is_vconn_src(int port);
/**
* Instructs the Attached.SRC or Attached.SNK to start sourcing VCONN.
* This function is called from the Policy Engine and only has effect
* if the current Type-C state Attached.SRC or Attached.SNK.
*
* @param port USB_C port number
*/
void pd_request_vconn_swap_on(int port);
/**
* Instructs the Attached.SRC or Attached.SNK to stop sourcing VCONN.
* This function is called from the Policy Engine and only has effect
* if the current Type-C state Attached.SRC or Attached.SNK.
*
* @param port USB_C port number
*/
void pd_request_vconn_swap_off(int port);
#endif
/**
* Returns the polarity of a Sink.
*
* @param cc1 value of CC1 set by tcpm_get_cc
* @param cc2 value of CC2 set by tcpm_get_cc
* @return 0 if cc1 is connected, else 1 for cc2
*/
enum pd_cc_polarity_type get_snk_polarity(enum tcpc_cc_voltage_status cc1,
enum tcpc_cc_voltage_status cc2);
/**
* Restarts the TCPC
*
@ -138,7 +321,29 @@ void tc_event_check(int port, int evt);
*/
void tc_run(const int port);
/**
* Attempt to activate VCONN
*
* @param port USB-C port number
*/
void tc_vconn_on(int port);
/**
* Start error recovery
*
* @param port USB-C port number
*/
void tc_start_error_recovery(int port);
/**
* Hard Reset the TypeC port
*
* @param port USB-C port number
*/
void tc_hard_reset(int port);
#ifdef CONFIG_USB_TYPEC_CTVPD
/**
* Resets the charge-through support timer. This can be
* called many times but the support timer will only
@ -148,6 +353,12 @@ void tc_run(const int port);
*/
void tc_reset_support_timer(int port);
#else
/**
*
*/
void tc_ctvpd_detected(int port);
#endif /* CONFIG_USB_TYPEC_CTVPD */
#endif /* __CROS_EC_USB_TC_H */

View File

@ -102,7 +102,7 @@ static struct pd_prl {
int mock_pe_error;
int mock_pe_hard_reset_sent;
int mock_pe_got_hard_reset;
int mock_pe_pass_up_message;
int mock_pe_message_received;
int mock_got_soft_reset;
} pd_port[CONFIG_USB_PD_PORT_COUNT];
@ -114,6 +114,7 @@ static void init_port(int port, int rev)
pd_port[port].data_role = PD_ROLE_UFP;
pd_port[port].msg_tx_id = 0;
pd_port[port].msg_rx_id = 0;
tcpm_init(port);
tcpm_set_polarity(port, 0);
tcpm_set_rx_enable(port, 0);
@ -224,7 +225,7 @@ static int verify_data_reception(int port, uint16_t header, int len)
if (pd_port[port].mock_pe_error >= 0)
return 0;
if (!pd_port[port].mock_pe_pass_up_message)
if (!pd_port[port].mock_pe_message_received)
return 0;
if (emsg[port].header != header)
@ -255,7 +256,7 @@ static int verify_chunk_data_reception(int port, uint16_t header, int len)
if (pd_port[port].mock_got_soft_reset)
return 0;
if (!pd_port[port].mock_pe_pass_up_message)
if (!pd_port[port].mock_pe_message_received)
return 0;
if (pd_port[port].mock_pe_error >= 0)
@ -282,7 +283,7 @@ static int simulate_receive_data(int port, enum pd_data_msg_type msg_type,
nw, pd_port[port].rev, 0);
pd_port[port].mock_pe_error = -1;
pd_port[port].mock_pe_pass_up_message = 0;
pd_port[port].mock_pe_message_received = 0;
emsg[port].header = 0;
emsg[port].len = 0;
memset(emsg[port].buf, 0, 260);
@ -322,7 +323,7 @@ static int simulate_receive_extended_data(int port,
int req_timeout;
pd_port[port].mock_pe_error = -1;
pd_port[port].mock_pe_pass_up_message = 0;
pd_port[port].mock_pe_message_received = 0;
emsg[port].header = 0;
emsg[port].len = 0;
memset(emsg[port].buf, 0, 260);
@ -354,7 +355,7 @@ static int simulate_receive_extended_data(int port,
if (pd_port[port].mock_pe_error >= 0)
return 0;
if (pd_port[port].mock_pe_pass_up_message)
if (pd_port[port].mock_pe_message_received)
return 0;
if (emsg[port].len != 0)
@ -699,9 +700,9 @@ void pe_got_hard_reset(int port)
pd_port[port].mock_pe_got_hard_reset = 1;
}
void pe_pass_up_message(int port)
void pe_message_received(int port)
{
pd_port[port].mock_pe_pass_up_message = 1;
pd_port[port].mock_pe_message_received = 1;
}
void pe_message_sent(int port)
@ -719,12 +720,14 @@ void pe_got_soft_reset(int port)
pd_port[port].mock_got_soft_reset = 1;
}
static int test_initial_states(void)
static int test_prl_reset(void)
{
int port = PORT0;
enable_prl(port, 1);
prl_reset(port);
TEST_ASSERT(prl_tx_get_state(port) ==
PRL_TX_WAIT_FOR_MESSAGE_REQUEST);
TEST_ASSERT(rch_get_state(port) ==
@ -733,6 +736,7 @@ static int test_initial_states(void)
TCH_WAIT_FOR_MESSAGE_REQUEST_FROM_PE);
TEST_ASSERT(prl_hr_get_state(port) ==
PRL_HR_WAIT_FOR_REQUEST);
enable_prl(port, 0);
return EC_SUCCESS;
}
@ -1047,7 +1051,7 @@ static int test_receive_soft_reset_msg(void)
pd_port[port].mock_got_soft_reset = 0;
pd_port[port].mock_pe_error = -1;
pd_port[port].mock_pe_pass_up_message = 0;
pd_port[port].mock_pe_message_received = 0;
TEST_ASSERT(simulate_receive_ctrl_msg(port, PD_CTRL_SOFT_RESET));
@ -1058,7 +1062,7 @@ static int test_receive_soft_reset_msg(void)
TEST_ASSERT(pd_port[port].mock_got_soft_reset);
TEST_ASSERT(pd_port[port].mock_pe_error < 0);
TEST_ASSERT(pd_port[port].mock_pe_pass_up_message);
TEST_ASSERT(pd_port[port].mock_pe_message_received);
TEST_ASSERT(expected_header == emsg[port].header);
TEST_ASSERT(emsg[port].len == 0);
@ -1090,7 +1094,7 @@ static int test_receive_control_msg(void)
pd_port[port].mock_got_soft_reset = 0;
pd_port[port].mock_pe_error = -1;
pd_port[port].mock_pe_pass_up_message = 0;
pd_port[port].mock_pe_message_received = 0;
TEST_ASSERT(simulate_receive_ctrl_msg(port, PD_CTRL_DR_SWAP));
@ -1101,7 +1105,7 @@ static int test_receive_control_msg(void)
TEST_ASSERT(!pd_port[port].mock_got_soft_reset);
TEST_ASSERT(pd_port[port].mock_pe_error < 0);
TEST_ASSERT(pd_port[port].mock_pe_pass_up_message);
TEST_ASSERT(pd_port[port].mock_pe_message_received);
TEST_ASSERT(expected_header == emsg[port].header);
TEST_ASSERT(emsg[port].len == 0);
@ -1317,7 +1321,7 @@ void run_test(void)
/* Test PD 2.0 Protocol */
init_port(PORT0, PD_REV20);
RUN_TEST(test_initial_states);
RUN_TEST(test_prl_reset);
RUN_TEST(test_send_ctrl_msg);
RUN_TEST(test_send_ctrl_msg_with_retry_and_fail);
RUN_TEST(test_send_ctrl_msg_with_retry_and_success);
@ -1334,7 +1338,7 @@ void run_test(void)
/* Test PD 3.0 Protocol */
init_port(PORT0, PD_REV30);
RUN_TEST(test_initial_states);
RUN_TEST(test_prl_reset);
RUN_TEST(test_send_ctrl_msg);
RUN_TEST(test_send_ctrl_msg_with_retry_and_fail);
RUN_TEST(test_send_ctrl_msg_with_retry_and_success);