aboutsummaryrefslogtreecommitdiff
path: root/net/bluetooth/hci_request.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/bluetooth/hci_request.c')
-rw-r--r--net/bluetooth/hci_request.c284
1 files changed, 158 insertions, 126 deletions
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index fa9125b782f8..f15626607b2d 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -745,17 +745,17 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
}
}
-static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
- u8 bdaddr_type)
+static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
+ u8 bdaddr_type)
{
- struct hci_cp_le_del_from_white_list cp;
+ struct hci_cp_le_del_from_accept_list cp;
cp.bdaddr_type = bdaddr_type;
bacpy(&cp.bdaddr, bdaddr);
- bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
+ bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr,
cp.bdaddr_type);
- hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
+ hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
if (use_ll_privacy(req->hdev) &&
hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
@@ -774,31 +774,31 @@ static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
}
}
-/* Adds connection to white list if needed. On error, returns -1. */
-static int add_to_white_list(struct hci_request *req,
- struct hci_conn_params *params, u8 *num_entries,
- bool allow_rpa)
+/* Adds connection to accept list if needed. On error, returns -1. */
+static int add_to_accept_list(struct hci_request *req,
+ struct hci_conn_params *params, u8 *num_entries,
+ bool allow_rpa)
{
- struct hci_cp_le_add_to_white_list cp;
+ struct hci_cp_le_add_to_accept_list cp;
struct hci_dev *hdev = req->hdev;
- /* Already in white list */
- if (hci_bdaddr_list_lookup(&hdev->le_white_list, &params->addr,
+ /* Already in accept list */
+ if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
params->addr_type))
return 0;
/* Select filter policy to accept all advertising */
- if (*num_entries >= hdev->le_white_list_size)
+ if (*num_entries >= hdev->le_accept_list_size)
return -1;
- /* White list can not be used with RPAs */
+ /* Accept list can not be used with RPAs */
if (!allow_rpa &&
!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
return -1;
}
- /* During suspend, only wakeable devices can be in whitelist */
+ /* During suspend, only wakeable devices can be in accept list */
if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
params->current_flags))
return 0;
@@ -807,9 +807,9 @@ static int add_to_white_list(struct hci_request *req,
cp.bdaddr_type = params->addr_type;
bacpy(&cp.bdaddr, &params->addr);
- bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
+ bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr,
cp.bdaddr_type);
- hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
+ hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
if (use_ll_privacy(hdev) &&
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
@@ -837,15 +837,15 @@ static int add_to_white_list(struct hci_request *req,
return 0;
}
-static u8 update_white_list(struct hci_request *req)
+static u8 update_accept_list(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
struct hci_conn_params *params;
struct bdaddr_list *b;
u8 num_entries = 0;
bool pend_conn, pend_report;
- /* We allow whitelisting even with RPAs in suspend. In the worst case,
- * we won't be able to wake from devices that use the privacy1.2
+ /* We allow usage of accept list even with RPAs in suspend. In the worst
+ * case, we won't be able to wake from devices that use the privacy1.2
* features. Additionally, once we support privacy1.2 and IRK
* offloading, we can update this to also check for those conditions.
*/
@@ -855,13 +855,13 @@ static u8 update_white_list(struct hci_request *req)
hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
allow_rpa = true;
- /* Go through the current white list programmed into the
+ /* Go through the current accept list programmed into the
* controller one by one and check if that address is still
* in the list of pending connections or list of devices to
* report. If not present in either list, then queue the
* command to remove it from the controller.
*/
- list_for_each_entry(b, &hdev->le_white_list, list) {
+ list_for_each_entry(b, &hdev->le_accept_list, list) {
pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
&b->bdaddr,
b->bdaddr_type);
@@ -870,14 +870,14 @@ static u8 update_white_list(struct hci_request *req)
b->bdaddr_type);
/* If the device is not likely to connect or report,
- * remove it from the whitelist.
+ * remove it from the accept list.
*/
if (!pend_conn && !pend_report) {
- del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
+ del_from_accept_list(req, &b->bdaddr, b->bdaddr_type);
continue;
}
- /* White list can not be used with RPAs */
+ /* Accept list can not be used with RPAs */
if (!allow_rpa &&
!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
@@ -887,27 +887,27 @@ static u8 update_white_list(struct hci_request *req)
num_entries++;
}
- /* Since all no longer valid white list entries have been
+ /* Since all no longer valid accept list entries have been
* removed, walk through the list of pending connections
* and ensure that any new device gets programmed into
* the controller.
*
* If the list of the devices is larger than the list of
- * available white list entries in the controller, then
+ * available accept list entries in the controller, then
* just abort and return filer policy value to not use the
- * white list.
+ * accept list.
*/
list_for_each_entry(params, &hdev->pend_le_conns, action) {
- if (add_to_white_list(req, params, &num_entries, allow_rpa))
+ if (add_to_accept_list(req, params, &num_entries, allow_rpa))
return 0x00;
}
/* After adding all new pending connections, walk through
* the list of pending reports and also add these to the
- * white list if there is still space. Abort if space runs out.
+ * accept list if there is still space. Abort if space runs out.
*/
list_for_each_entry(params, &hdev->pend_le_reports, action) {
- if (add_to_white_list(req, params, &num_entries, allow_rpa))
+ if (add_to_accept_list(req, params, &num_entries, allow_rpa))
return 0x00;
}
@@ -921,7 +921,7 @@ static u8 update_white_list(struct hci_request *req)
hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
return 0x00;
- /* Select filter policy to use white list */
+ /* Select filter policy to use accept list */
return 0x01;
}
@@ -932,7 +932,7 @@ static bool scan_use_rpa(struct hci_dev *hdev)
static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
u16 window, u8 own_addr_type, u8 filter_policy,
- bool addr_resolv)
+ bool filter_dup, bool addr_resolv)
{
struct hci_dev *hdev = req->hdev;
@@ -997,7 +997,7 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
ext_enable_cp.enable = LE_SCAN_ENABLE;
- ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ ext_enable_cp.filter_dup = filter_dup;
hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
sizeof(ext_enable_cp), &ext_enable_cp);
@@ -1016,7 +1016,7 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
memset(&enable_cp, 0, sizeof(enable_cp));
enable_cp.enable = LE_SCAN_ENABLE;
- enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ enable_cp.filter_dup = filter_dup;
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
&enable_cp);
}
@@ -1053,6 +1053,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
u8 own_addr_type;
u8 filter_policy;
u16 window, interval;
+ /* Default is to enable duplicates filter */
+ u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
/* Background scanning should run with address resolution */
bool addr_resolv = true;
@@ -1076,20 +1078,20 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
return;
bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
- /* Adding or removing entries from the white list must
+ /* Adding or removing entries from the accept list must
* happen before enabling scanning. The controller does
- * not allow white list modification while scanning.
+ * not allow accept list modification while scanning.
*/
- filter_policy = update_white_list(req);
+ filter_policy = update_accept_list(req);
/* When the controller is using random resolvable addresses and
* with that having LE privacy enabled, then controllers with
* Extended Scanner Filter Policies support can now enable support
* for handling directed advertising.
*
- * So instead of using filter polices 0x00 (no whitelist)
- * and 0x01 (whitelist enabled) use the new filter policies
- * 0x02 (no whitelist) and 0x03 (whitelist enabled).
+ * So instead of using filter polices 0x00 (no accept list)
+ * and 0x01 (accept list enabled) use the new filter policies
+ * 0x02 (no accept list) and 0x03 (accept list enabled).
*/
if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
@@ -1106,14 +1108,30 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
} else if (hci_is_adv_monitoring(hdev)) {
window = hdev->le_scan_window_adv_monitor;
interval = hdev->le_scan_int_adv_monitor;
+
+ /* Disable duplicates filter when scanning for advertisement
+ * monitor for the following reasons.
+ *
+ * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
+ * controllers ignore RSSI_Sampling_Period when the duplicates
+ * filter is enabled.
+ *
+ * For SW pattern filtering, when we're not doing interleaved
+ * scanning, it is necessary to disable duplicates filter,
+ * otherwise hosts can only receive one advertisement and it's
+ * impossible to know if a peer is still in range.
+ */
+ filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
} else {
window = hdev->le_scan_window;
interval = hdev->le_scan_interval;
}
- bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
+ bt_dev_dbg(hdev, "LE passive scan with accept list = %d",
+ filter_policy);
hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
- own_addr_type, filter_policy, addr_resolv);
+ own_addr_type, filter_policy, filter_dup,
+ addr_resolv);
}
static bool adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
@@ -1163,7 +1181,7 @@ static void hci_req_set_event_filter(struct hci_request *req)
/* Always clear event filter when starting */
hci_req_clear_event_filter(req);
- list_for_each_entry(b, &hdev->whitelist, list) {
+ list_for_each_entry(b, &hdev->accept_list, list) {
if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
b->current_flags))
continue;
@@ -1502,13 +1520,14 @@ static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
if (hci_conn_num(hdev, LE_LINK) == 0)
return true;
- /* Check le_states if there is any connection in slave role. */
- if (hdev->conn_hash.le_num_slave > 0) {
- /* Slave connection state and non connectable mode bit 20. */
+ /* Check le_states if there is any connection in peripheral role. */
+ if (hdev->conn_hash.le_num_peripheral > 0) {
+ /* Peripheral connection state and non connectable mode bit 20.
+ */
if (!connectable && !(hdev->le_states[2] & 0x10))
return false;
- /* Slave connection state and connectable mode bit 38
+ /* Peripheral connection state and connectable mode bit 38
* and scannable bit 21.
*/
if (connectable && (!(hdev->le_states[4] & 0x40) ||
@@ -1516,13 +1535,13 @@ static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
return false;
}
- /* Check le_states if there is any connection in master role. */
- if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
- /* Master connection state and non connectable mode bit 18. */
+ /* Check le_states if there is any connection in central role. */
+ if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
+ /* Central connection state and non connectable mode bit 18. */
if (!connectable && !(hdev->le_states[2] & 0x02))
return false;
- /* Master connection state and connectable mode bit 35 and
+ /* Central connection state and connectable mode bit 35 and
* scannable 19.
*/
if (connectable && (!(hdev->le_states[4] & 0x08) ||
@@ -1697,30 +1716,33 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
return;
if (ext_adv_capable(hdev)) {
- struct hci_cp_le_set_ext_scan_rsp_data cp;
+ struct {
+ struct hci_cp_le_set_ext_scan_rsp_data cp;
+ u8 data[HCI_MAX_EXT_AD_LENGTH];
+ } pdu;
- memset(&cp, 0, sizeof(cp));
+ memset(&pdu, 0, sizeof(pdu));
if (instance)
len = create_instance_scan_rsp_data(hdev, instance,
- cp.data);
+ pdu.data);
else
- len = create_default_scan_rsp_data(hdev, cp.data);
+ len = create_default_scan_rsp_data(hdev, pdu.data);
if (hdev->scan_rsp_data_len == len &&
- !memcmp(cp.data, hdev->scan_rsp_data, len))
+ !memcmp(pdu.data, hdev->scan_rsp_data, len))
return;
- memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
+ memcpy(hdev->scan_rsp_data, pdu.data, len);
hdev->scan_rsp_data_len = len;
- cp.handle = instance;
- cp.length = len;
- cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
- cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ pdu.cp.handle = instance;
+ pdu.cp.length = len;
+ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
- hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
- &cp);
+ hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
+ sizeof(pdu.cp) + len, &pdu.cp);
} else {
struct hci_cp_le_set_scan_rsp_data cp;
@@ -1843,26 +1865,30 @@ void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
return;
if (ext_adv_capable(hdev)) {
- struct hci_cp_le_set_ext_adv_data cp;
+ struct {
+ struct hci_cp_le_set_ext_adv_data cp;
+ u8 data[HCI_MAX_EXT_AD_LENGTH];
+ } pdu;
- memset(&cp, 0, sizeof(cp));
+ memset(&pdu, 0, sizeof(pdu));
- len = create_instance_adv_data(hdev, instance, cp.data);
+ len = create_instance_adv_data(hdev, instance, pdu.data);
/* There's nothing to do if the data hasn't changed */
if (hdev->adv_data_len == len &&
- memcmp(cp.data, hdev->adv_data, len) == 0)
+ memcmp(pdu.data, hdev->adv_data, len) == 0)
return;
- memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+ memcpy(hdev->adv_data, pdu.data, len);
hdev->adv_data_len = len;
- cp.length = len;
- cp.handle = instance;
- cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
- cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ pdu.cp.length = len;
+ pdu.cp.handle = instance;
+ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
- hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
+ hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA,
+ sizeof(pdu.cp) + len, &pdu.cp);
} else {
struct hci_cp_le_set_adv_data cp;
@@ -2046,8 +2072,6 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
* current RPA has expired then generate a new one.
*/
if (use_rpa) {
- int to;
-
/* If Controller supports LL Privacy use own address type is
* 0x03
*/
@@ -2058,14 +2082,10 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
*own_addr_type = ADDR_LE_DEV_RANDOM;
if (adv_instance) {
- if (!adv_instance->rpa_expired &&
- !bacmp(&adv_instance->random_addr, &hdev->rpa))
+ if (adv_rpa_valid(adv_instance))
return 0;
-
- adv_instance->rpa_expired = false;
} else {
- if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
- !bacmp(&hdev->random_addr, &hdev->rpa))
+ if (rpa_valid(hdev))
return 0;
}
@@ -2077,14 +2097,6 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
bacpy(rand_addr, &hdev->rpa);
- to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
- if (adv_instance)
- queue_delayed_work(hdev->workqueue,
- &adv_instance->rpa_expired_cb, to);
- else
- queue_delayed_work(hdev->workqueue,
- &hdev->rpa_expired, to);
-
return 0;
}
@@ -2127,6 +2139,30 @@ void __hci_req_clear_ext_adv_sets(struct hci_request *req)
hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL);
}
+static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
+{
+ struct hci_dev *hdev = req->hdev;
+
+ /* If we're advertising or initiating an LE connection we can't
+ * go ahead and change the random address at this time. This is
+ * because the eventual initiator address used for the
+ * subsequently created connection will be undefined (some
+ * controllers use the new address and others the one we had
+ * when the operation started).
+ *
+ * In this kind of scenario skip the update and let the random
+ * address be updated at the next cycle.
+ */
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
+ hci_lookup_le_connect(hdev)) {
+ bt_dev_dbg(hdev, "Deferring random address update");
+ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
+ return;
+ }
+
+ hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
+}
+
int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
{
struct hci_cp_le_set_ext_adv_params cp;
@@ -2229,6 +2265,13 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
} else {
if (!bacmp(&random_addr, &hdev->random_addr))
return 0;
+ /* Instance 0x00 doesn't have an adv_info, instead it
+ * uses hdev->random_addr to track its address so
+ * whenever it needs to be updated this also set the
+ * random address since hdev->random_addr is shared with
+ * scan state machine.
+ */
+ set_random_addr(req, &random_addr);
}
memset(&cp, 0, sizeof(cp));
@@ -2486,30 +2529,6 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
false);
}
-static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
-{
- struct hci_dev *hdev = req->hdev;
-
- /* If we're advertising or initiating an LE connection we can't
- * go ahead and change the random address at this time. This is
- * because the eventual initiator address used for the
- * subsequently created connection will be undefined (some
- * controllers use the new address and others the one we had
- * when the operation started).
- *
- * In this kind of scenario skip the update and let the random
- * address be updated at the next cycle.
- */
- if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
- hci_lookup_le_connect(hdev)) {
- bt_dev_dbg(hdev, "Deferring random address update");
- hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
- return;
- }
-
- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
-}
-
int hci_update_random_address(struct hci_request *req, bool require_privacy,
bool use_rpa, u8 *own_addr_type)
{
@@ -2521,8 +2540,6 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
* the current RPA in use, then generate a new one.
*/
if (use_rpa) {
- int to;
-
/* If Controller supports LL Privacy use own address type is
* 0x03
*/
@@ -2532,8 +2549,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
else
*own_addr_type = ADDR_LE_DEV_RANDOM;
- if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
- !bacmp(&hdev->random_addr, &hdev->rpa))
+ if (rpa_valid(hdev))
return 0;
err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
@@ -2544,9 +2560,6 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
set_random_addr(req, &hdev->rpa);
- to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
- queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
-
return 0;
}
@@ -2605,11 +2618,11 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
return 0;
}
-static bool disconnected_whitelist_entries(struct hci_dev *hdev)
+static bool disconnected_accept_list_entries(struct hci_dev *hdev)
{
struct bdaddr_list *b;
- list_for_each_entry(b, &hdev->whitelist, list) {
+ list_for_each_entry(b, &hdev->accept_list, list) {
struct hci_conn *conn;
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
@@ -2641,7 +2654,7 @@ void __hci_req_update_scan(struct hci_request *req)
return;
if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
- disconnected_whitelist_entries(hdev))
+ disconnected_accept_list_entries(hdev))
scan = SCAN_PAGE;
else
scan = SCAN_DISABLED;
@@ -3133,8 +3146,10 @@ static int active_scan(struct hci_request *req, unsigned long opt)
uint16_t interval = opt;
struct hci_dev *hdev = req->hdev;
u8 own_addr_type;
- /* White list is not used for discovery */
+ /* Accept list is not used for discovery */
u8 filter_policy = 0x00;
+ /* Default is to enable duplicates filter */
+ u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
/* Discovery doesn't require controller address resolution */
bool addr_resolv = false;
int err;
@@ -3159,9 +3174,26 @@ static int active_scan(struct hci_request *req, unsigned long opt)
if (err < 0)
own_addr_type = ADDR_LE_DEV_PUBLIC;
+ if (hci_is_adv_monitoring(hdev)) {
+ /* Duplicate filter should be disabled when some advertisement
+ * monitor is activated, otherwise AdvMon can only receive one
+ * advertisement for one peer(*) during active scanning, and
+ * might report loss to these peers.
+ *
+ * Note that different controllers have different meanings of
+ * |duplicate|. Some of them consider packets with the same
+ * address as duplicate, and others consider packets with the
+ * same address and the same RSSI as duplicate. Although in the
+ * latter case we don't need to disable duplicate filter, but
+ * it is common to have active scanning for a short period of
+ * time, the power impact should be neglectable.
+ */
+ filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
+ }
+
hci_req_start_scan(req, LE_SCAN_ACTIVE, interval,
hdev->le_scan_window_discovery, own_addr_type,
- filter_policy, addr_resolv);
+ filter_policy, filter_dup, addr_resolv);
return 0;
}