blob: ed6fcfe62f7cff671db142c08407489448a5ed23 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
Gustavo Padovan8c520a52012-05-23 04:04:22 -030027#include <linux/export.h>
Johannes Berg787b3062016-01-06 14:38:40 +010028#include <linux/utsname.h>
Marcel Holtmann70ecce92016-08-27 20:23:38 +020029#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/unaligned.h>
31
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
Marcel Holtmanncd82e612012-02-20 20:34:38 +010034#include <net/bluetooth/hci_mon.h>
Johan Hedbergfa4335d2015-03-17 13:48:50 +020035#include <net/bluetooth/mgmt.h>
36
37#include "mgmt_util.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg801c1e82015-03-06 21:08:50 +020039static LIST_HEAD(mgmt_chan_list);
40static DEFINE_MUTEX(mgmt_chan_list_lock);
41
Marcel Holtmann70ecce92016-08-27 20:23:38 +020042static DEFINE_IDA(sock_cookie_ida);
43
Marcel Holtmanncd82e612012-02-20 20:34:38 +010044static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* ----- HCI socket interface ----- */
47
Marcel Holtmann863def52014-07-11 05:41:00 +020048/* Socket info */
49#define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51struct hci_pinfo {
52 struct bt_sock bt;
53 struct hci_dev *hdev;
54 struct hci_filter filter;
55 __u32 cmsg_mask;
56 unsigned short channel;
Marcel Holtmann6befc642015-03-14 19:27:53 -070057 unsigned long flags;
Marcel Holtmann70ecce92016-08-27 20:23:38 +020058 __u32 cookie;
59 char comm[TASK_COMM_LEN];
Marcel Holtmann863def52014-07-11 05:41:00 +020060};
61
Tetsuo Handa0782c8c2021-08-04 19:26:56 +090062static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
63{
64 struct hci_dev *hdev = hci_pi(sk)->hdev;
65
66 if (!hdev)
67 return ERR_PTR(-EBADFD);
68 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69 return ERR_PTR(-EPIPE);
70 return hdev;
71}
72
Marcel Holtmann6befc642015-03-14 19:27:53 -070073void hci_sock_set_flag(struct sock *sk, int nr)
74{
75 set_bit(nr, &hci_pi(sk)->flags);
76}
77
78void hci_sock_clear_flag(struct sock *sk, int nr)
79{
80 clear_bit(nr, &hci_pi(sk)->flags);
81}
82
Marcel Holtmannc85be542015-03-14 19:28:00 -070083int hci_sock_test_flag(struct sock *sk, int nr)
84{
85 return test_bit(nr, &hci_pi(sk)->flags);
86}
87
Johan Hedbergd0f172b2015-03-17 13:48:46 +020088unsigned short hci_sock_get_channel(struct sock *sk)
89{
90 return hci_pi(sk)->channel;
91}
92
Marcel Holtmann70ecce92016-08-27 20:23:38 +020093u32 hci_sock_get_cookie(struct sock *sk)
94{
95 return hci_pi(sk)->cookie;
96}
97
Marcel Holtmanndf1cb872016-08-30 05:00:34 +020098static bool hci_sock_gen_cookie(struct sock *sk)
99{
100 int id = hci_pi(sk)->cookie;
101
102 if (!id) {
103 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
104 if (id < 0)
105 id = 0xffffffff;
106
107 hci_pi(sk)->cookie = id;
108 get_task_comm(hci_pi(sk)->comm, current);
109 return true;
110 }
111
112 return false;
113}
114
115static void hci_sock_free_cookie(struct sock *sk)
116{
117 int id = hci_pi(sk)->cookie;
118
119 if (id) {
120 hci_pi(sk)->cookie = 0xffffffff;
121 ida_simple_remove(&sock_cookie_ida, id);
122 }
123}
124
Jiri Slaby93919762015-02-19 15:20:43 +0100125static inline int hci_test_bit(int nr, const void *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
Jiri Slaby93919762015-02-19 15:20:43 +0100127 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
130/* Security filter */
Marcel Holtmann3ad254f2014-07-11 05:36:39 +0200131#define HCI_SFLT_MAX_OGF 5
132
133struct hci_sec_filter {
134 __u32 type_mask;
135 __u32 event_mask[2];
136 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
137};
138
Marcel Holtmann7e67c112014-07-11 05:36:40 +0200139static const struct hci_sec_filter hci_sec_filter = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 /* Packet types */
141 0x10,
142 /* Events */
Marcel Holtmanndd7f5522005-10-28 19:20:53 +0200143 { 0x1000d9fe, 0x0000b00c },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 /* Commands */
145 {
146 { 0x0 },
147 /* OGF_LINK_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200148 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 /* OGF_LINK_POLICY */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200150 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 /* OGF_HOST_CTL */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200152 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 /* OGF_INFO_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200154 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 /* OGF_STATUS_PARAM */
Marcel Holtmann7c631a62007-09-09 08:39:43 +0200156 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 }
158};
159
160static struct bt_sock_list hci_sk_list = {
Robert P. J. Dayd5fb2962008-03-28 16:17:38 -0700161 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162};
163
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700164static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
165{
166 struct hci_filter *flt;
167 int flt_type, flt_event;
168
169 /* Apply filter */
170 flt = &hci_pi(sk)->filter;
171
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100172 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700173
174 if (!test_bit(flt_type, &flt->type_mask))
175 return true;
176
177 /* Extra filter for event packets only */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100178 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
Marcel Holtmannf81fe642013-08-25 23:25:15 -0700179 return false;
180
181 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
182
183 if (!hci_test_bit(flt_event, &flt->event_mask))
184 return true;
185
186 /* Check filter only when opcode is set */
187 if (!flt->opcode)
188 return false;
189
190 if (flt_event == HCI_EV_CMD_COMPLETE &&
191 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
192 return true;
193
194 if (flt_event == HCI_EV_CMD_STATUS &&
195 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
196 return true;
197
198 return false;
199}
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201/* Send frame to RAW socket */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100202void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
204 struct sock *sk;
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100205 struct sk_buff *skb_copy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 BT_DBG("hdev %p len %d", hdev, skb->len);
208
209 read_lock(&hci_sk_list.lock);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100210
Sasha Levinb67bfe02013-02-27 17:06:00 -0800211 sk_for_each(sk, &hci_sk_list.head) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 struct sk_buff *nskb;
213
214 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
215 continue;
216
217 /* Don't send frame to the socket it came from */
218 if (skb->sk == sk)
219 continue;
220
Marcel Holtmann23500182013-08-26 21:40:52 -0700221 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100222 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmannbb775432015-10-09 16:13:50 +0200226 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700227 if (is_filtered_packet(sk, skb))
228 continue;
229 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
230 if (!bt_cb(skb)->incoming)
231 continue;
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100232 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
233 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
234 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
Marcel Holtmann23500182013-08-26 21:40:52 -0700235 continue;
236 } else {
237 /* Don't send frame to other channel types */
Johan Hedberga40c4062010-12-08 00:21:07 +0200238 continue;
Marcel Holtmann23500182013-08-26 21:40:52 -0700239 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100241 if (!skb_copy) {
242 /* Create a private copy with headroom */
Octavian Purdilabad93e92014-06-12 01:36:26 +0300243 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100244 if (!skb_copy)
245 continue;
246
247 /* Put type byte before the data */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100248 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100249 }
250
251 nskb = skb_clone(skb_copy, GFP_ATOMIC);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200252 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 continue;
254
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 if (sock_queue_rcv_skb(sk, nskb))
256 kfree_skb(nskb);
257 }
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100258
259 read_unlock(&hci_sk_list.lock);
Marcel Holtmanne0edf372012-02-20 14:50:36 +0100260
261 kfree_skb(skb_copy);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100262}
263
Johan Hedberg71290692015-02-20 13:26:23 +0200264/* Send frame to sockets with specific channel */
265void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700266 int flag, struct sock *skip_sk)
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100267{
268 struct sock *sk;
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100269
Johan Hedberg71290692015-02-20 13:26:23 +0200270 BT_DBG("channel %u len %d", channel, skb->len);
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100271
272 read_lock(&hci_sk_list.lock);
273
Sasha Levinb67bfe02013-02-27 17:06:00 -0800274 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100275 struct sk_buff *nskb;
276
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700277 /* Ignore socket without the flag set */
Marcel Holtmannc85be542015-03-14 19:28:00 -0700278 if (!hci_sock_test_flag(sk, flag))
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700279 continue;
280
Marcel Holtmann470fe1b2012-02-20 14:50:30 +0100281 /* Skip the original socket */
282 if (sk == skip_sk)
283 continue;
284
285 if (sk->sk_state != BT_BOUND)
286 continue;
287
Johan Hedberg71290692015-02-20 13:26:23 +0200288 if (hci_pi(sk)->channel != channel)
Marcel Holtmannd7f72f62015-01-11 19:33:32 -0800289 continue;
290
291 nskb = skb_clone(skb, GFP_ATOMIC);
292 if (!nskb)
293 continue;
294
295 if (sock_queue_rcv_skb(sk, nskb))
296 kfree_skb(nskb);
297 }
298
299 read_unlock(&hci_sk_list.lock);
300}
301
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100302/* Send frame to monitor socket */
303void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
304{
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100305 struct sk_buff *skb_copy = NULL;
Marcel Holtmann2b531292015-01-11 19:33:31 -0800306 struct hci_mon_hdr *hdr;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100307 __le16 opcode;
308
309 if (!atomic_read(&monitor_promisc))
310 return;
311
312 BT_DBG("hdev %p len %d", hdev, skb->len);
313
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100314 switch (hci_skb_pkt_type(skb)) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100315 case HCI_COMMAND_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700316 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100317 break;
318 case HCI_EVENT_PKT:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700319 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100320 break;
321 case HCI_ACLDATA_PKT:
322 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700323 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100324 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700325 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100326 break;
327 case HCI_SCODATA_PKT:
328 if (bt_cb(skb)->incoming)
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700329 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100330 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700331 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100332 break;
Marcel Holtmanne875ff82015-10-07 16:38:35 +0200333 case HCI_DIAG_PKT:
334 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
335 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100336 default:
337 return;
338 }
339
Marcel Holtmann2b531292015-01-11 19:33:31 -0800340 /* Create a private copy with headroom */
341 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
342 if (!skb_copy)
343 return;
344
345 /* Put header before the data */
Johannes Bergd58ff352017-06-16 14:29:23 +0200346 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
Marcel Holtmann2b531292015-01-11 19:33:31 -0800347 hdr->opcode = opcode;
348 hdr->index = cpu_to_le16(hdev->id);
349 hdr->len = cpu_to_le16(skb->len);
350
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700351 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
352 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100353 kfree_skb(skb_copy);
354}
355
Marcel Holtmann38ceaa02016-08-27 20:23:41 +0200356void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
357 void *data, u16 data_len, ktime_t tstamp,
358 int flag, struct sock *skip_sk)
359{
360 struct sock *sk;
361 __le16 index;
362
363 if (hdev)
364 index = cpu_to_le16(hdev->id);
365 else
366 index = cpu_to_le16(MGMT_INDEX_NONE);
367
368 read_lock(&hci_sk_list.lock);
369
370 sk_for_each(sk, &hci_sk_list.head) {
371 struct hci_mon_hdr *hdr;
372 struct sk_buff *skb;
373
374 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
375 continue;
376
377 /* Ignore socket without the flag set */
378 if (!hci_sock_test_flag(sk, flag))
379 continue;
380
381 /* Skip the original socket */
382 if (sk == skip_sk)
383 continue;
384
385 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
386 if (!skb)
387 continue;
388
389 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
390 put_unaligned_le16(event, skb_put(skb, 2));
391
392 if (data)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200393 skb_put_data(skb, data, data_len);
Marcel Holtmann38ceaa02016-08-27 20:23:41 +0200394
395 skb->tstamp = tstamp;
396
Johannes Bergd58ff352017-06-16 14:29:23 +0200397 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
Marcel Holtmann38ceaa02016-08-27 20:23:41 +0200398 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
399 hdr->index = index;
400 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
401
402 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
403 HCI_SOCK_TRUSTED, NULL);
404 kfree_skb(skb);
405 }
406
407 read_unlock(&hci_sk_list.lock);
408}
409
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100410static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
411{
412 struct hci_mon_hdr *hdr;
413 struct hci_mon_new_index *ni;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200414 struct hci_mon_index_info *ii;
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100415 struct sk_buff *skb;
416 __le16 opcode;
417
418 switch (event) {
419 case HCI_DEV_REG:
420 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
421 if (!skb)
422 return NULL;
423
Johannes Berg4df864c2017-06-16 14:29:21 +0200424 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100425 ni->type = hdev->dev_type;
426 ni->bus = hdev->bus;
427 bacpy(&ni->bdaddr, &hdev->bdaddr);
428 memcpy(ni->name, hdev->name, 8);
429
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700430 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100431 break;
432
433 case HCI_DEV_UNREG:
434 skb = bt_skb_alloc(0, GFP_ATOMIC);
435 if (!skb)
436 return NULL;
437
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700438 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100439 break;
440
Marcel Holtmanne131d742015-10-20 02:30:47 +0200441 case HCI_DEV_SETUP:
442 if (hdev->manufacturer == 0xffff)
443 return NULL;
444
445 /* fall through */
446
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200447 case HCI_DEV_UP:
448 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
449 if (!skb)
450 return NULL;
451
Johannes Berg4df864c2017-06-16 14:29:21 +0200452 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200453 bacpy(&ii->bdaddr, &hdev->bdaddr);
454 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
455
456 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
457 break;
458
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200459 case HCI_DEV_OPEN:
460 skb = bt_skb_alloc(0, GFP_ATOMIC);
461 if (!skb)
462 return NULL;
463
464 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
465 break;
466
467 case HCI_DEV_CLOSE:
468 skb = bt_skb_alloc(0, GFP_ATOMIC);
469 if (!skb)
470 return NULL;
471
472 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
473 break;
474
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100475 default:
476 return NULL;
477 }
478
479 __net_timestamp(skb);
480
Johannes Bergd58ff352017-06-16 14:29:23 +0200481 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100482 hdr->opcode = opcode;
483 hdr->index = cpu_to_le16(hdev->id);
484 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
485
486 return skb;
487}
488
Marcel Holtmann249fa162016-08-27 20:23:40 +0200489static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
490{
491 struct hci_mon_hdr *hdr;
492 struct sk_buff *skb;
Marcel Holtmannd0bef1d2016-08-30 05:00:38 +0200493 u16 format;
Marcel Holtmann249fa162016-08-27 20:23:40 +0200494 u8 ver[3];
495 u32 flags;
496
Marcel Holtmann0ef2c422016-08-30 05:00:36 +0200497 /* No message needed when cookie is not present */
498 if (!hci_pi(sk)->cookie)
499 return NULL;
500
Marcel Holtmannd0bef1d2016-08-30 05:00:38 +0200501 switch (hci_pi(sk)->channel) {
Marcel Holtmannf81f5b22016-08-30 05:00:39 +0200502 case HCI_CHANNEL_RAW:
503 format = 0x0000;
504 ver[0] = BT_SUBSYS_VERSION;
505 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
506 break;
Marcel Holtmannaa1638d2016-09-01 19:48:28 +0200507 case HCI_CHANNEL_USER:
508 format = 0x0001;
509 ver[0] = BT_SUBSYS_VERSION;
510 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
511 break;
Marcel Holtmannd0bef1d2016-08-30 05:00:38 +0200512 case HCI_CHANNEL_CONTROL:
513 format = 0x0002;
514 mgmt_fill_version_info(ver);
515 break;
516 default:
517 /* No message for unsupported format */
518 return NULL;
519 }
520
Marcel Holtmann249fa162016-08-27 20:23:40 +0200521 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
522 if (!skb)
523 return NULL;
524
Marcel Holtmann249fa162016-08-27 20:23:40 +0200525 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
526
527 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
528 put_unaligned_le16(format, skb_put(skb, 2));
Johannes Berg59ae1d12017-06-16 14:29:20 +0200529 skb_put_data(skb, ver, sizeof(ver));
Marcel Holtmann249fa162016-08-27 20:23:40 +0200530 put_unaligned_le32(flags, skb_put(skb, 4));
Johannes Berg634fef62017-06-16 14:29:24 +0200531 skb_put_u8(skb, TASK_COMM_LEN);
Johannes Berg59ae1d12017-06-16 14:29:20 +0200532 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
Marcel Holtmann249fa162016-08-27 20:23:40 +0200533
534 __net_timestamp(skb);
535
Johannes Bergd58ff352017-06-16 14:29:23 +0200536 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
Marcel Holtmann249fa162016-08-27 20:23:40 +0200537 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
Marcel Holtmann0ef2c422016-08-30 05:00:36 +0200538 if (hci_pi(sk)->hdev)
539 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
540 else
541 hdr->index = cpu_to_le16(HCI_DEV_NONE);
Marcel Holtmann249fa162016-08-27 20:23:40 +0200542 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
543
544 return skb;
545}
546
547static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
548{
549 struct hci_mon_hdr *hdr;
550 struct sk_buff *skb;
551
Marcel Holtmann0ef2c422016-08-30 05:00:36 +0200552 /* No message needed when cookie is not present */
553 if (!hci_pi(sk)->cookie)
554 return NULL;
555
Marcel Holtmannd0bef1d2016-08-30 05:00:38 +0200556 switch (hci_pi(sk)->channel) {
Marcel Holtmannf81f5b22016-08-30 05:00:39 +0200557 case HCI_CHANNEL_RAW:
Marcel Holtmannaa1638d2016-09-01 19:48:28 +0200558 case HCI_CHANNEL_USER:
Marcel Holtmannd0bef1d2016-08-30 05:00:38 +0200559 case HCI_CHANNEL_CONTROL:
560 break;
561 default:
562 /* No message for unsupported format */
563 return NULL;
564 }
565
Marcel Holtmann249fa162016-08-27 20:23:40 +0200566 skb = bt_skb_alloc(4, GFP_ATOMIC);
567 if (!skb)
568 return NULL;
569
570 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
571
572 __net_timestamp(skb);
573
Johannes Bergd58ff352017-06-16 14:29:23 +0200574 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
Marcel Holtmann249fa162016-08-27 20:23:40 +0200575 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
Marcel Holtmann0ef2c422016-08-30 05:00:36 +0200576 if (hci_pi(sk)->hdev)
577 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
578 else
579 hdr->index = cpu_to_le16(HCI_DEV_NONE);
Marcel Holtmann249fa162016-08-27 20:23:40 +0200580 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
581
582 return skb;
583}
584
Marcel Holtmann38ceaa02016-08-27 20:23:41 +0200585static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
586 u16 opcode, u16 len,
587 const void *buf)
588{
589 struct hci_mon_hdr *hdr;
590 struct sk_buff *skb;
591
592 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
593 if (!skb)
594 return NULL;
595
596 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
597 put_unaligned_le16(opcode, skb_put(skb, 2));
598
599 if (buf)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200600 skb_put_data(skb, buf, len);
Marcel Holtmann38ceaa02016-08-27 20:23:41 +0200601
602 __net_timestamp(skb);
603
Johannes Bergd58ff352017-06-16 14:29:23 +0200604 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
Marcel Holtmann38ceaa02016-08-27 20:23:41 +0200605 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
606 hdr->index = cpu_to_le16(index);
607 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
608
609 return skb;
610}
611
Johannes Berg787b3062016-01-06 14:38:40 +0100612static void __printf(2, 3)
613send_monitor_note(struct sock *sk, const char *fmt, ...)
Marcel Holtmanndd315062015-11-08 07:47:12 +0100614{
Johannes Berg787b3062016-01-06 14:38:40 +0100615 size_t len;
Marcel Holtmanndd315062015-11-08 07:47:12 +0100616 struct hci_mon_hdr *hdr;
617 struct sk_buff *skb;
Johannes Berg787b3062016-01-06 14:38:40 +0100618 va_list args;
619
620 va_start(args, fmt);
621 len = vsnprintf(NULL, 0, fmt, args);
622 va_end(args);
Marcel Holtmanndd315062015-11-08 07:47:12 +0100623
624 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
625 if (!skb)
626 return;
627
Johannes Berg787b3062016-01-06 14:38:40 +0100628 va_start(args, fmt);
629 vsprintf(skb_put(skb, len), fmt, args);
Johannes Berg4df864c2017-06-16 14:29:21 +0200630 *(u8 *)skb_put(skb, 1) = 0;
Johannes Berg787b3062016-01-06 14:38:40 +0100631 va_end(args);
Marcel Holtmanndd315062015-11-08 07:47:12 +0100632
633 __net_timestamp(skb);
634
635 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
636 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
637 hdr->index = cpu_to_le16(HCI_DEV_NONE);
638 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
639
640 if (sock_queue_rcv_skb(sk, skb))
641 kfree_skb(skb);
642}
643
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100644static void send_monitor_replay(struct sock *sk)
645{
646 struct hci_dev *hdev;
647
648 read_lock(&hci_dev_list_lock);
649
650 list_for_each_entry(hdev, &hci_dev_list, list) {
651 struct sk_buff *skb;
652
653 skb = create_monitor_event(hdev, HCI_DEV_REG);
654 if (!skb)
655 continue;
656
657 if (sock_queue_rcv_skb(sk, skb))
658 kfree_skb(skb);
Marcel Holtmann22db3cbc2015-10-04 23:34:03 +0200659
660 if (!test_bit(HCI_RUNNING, &hdev->flags))
661 continue;
662
663 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
664 if (!skb)
665 continue;
666
667 if (sock_queue_rcv_skb(sk, skb))
668 kfree_skb(skb);
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200669
Marcel Holtmanne131d742015-10-20 02:30:47 +0200670 if (test_bit(HCI_UP, &hdev->flags))
671 skb = create_monitor_event(hdev, HCI_DEV_UP);
672 else if (hci_dev_test_flag(hdev, HCI_SETUP))
673 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
674 else
675 skb = NULL;
Marcel Holtmann6c566dd2015-10-07 15:32:13 +0200676
Marcel Holtmanne131d742015-10-20 02:30:47 +0200677 if (skb) {
678 if (sock_queue_rcv_skb(sk, skb))
679 kfree_skb(skb);
680 }
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100681 }
682
683 read_unlock(&hci_dev_list_lock);
684}
685
Marcel Holtmann249fa162016-08-27 20:23:40 +0200686static void send_monitor_control_replay(struct sock *mon_sk)
687{
688 struct sock *sk;
689
690 read_lock(&hci_sk_list.lock);
691
692 sk_for_each(sk, &hci_sk_list.head) {
693 struct sk_buff *skb;
694
Marcel Holtmann249fa162016-08-27 20:23:40 +0200695 skb = create_monitor_ctrl_open(sk);
696 if (!skb)
697 continue;
698
699 if (sock_queue_rcv_skb(mon_sk, skb))
700 kfree_skb(skb);
701 }
702
703 read_unlock(&hci_sk_list.lock);
704}
705
Marcel Holtmann040030e2012-02-20 14:50:37 +0100706/* Generate internal stack event */
707static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
708{
709 struct hci_event_hdr *hdr;
710 struct hci_ev_stack_internal *ev;
711 struct sk_buff *skb;
712
713 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
714 if (!skb)
715 return;
716
Johannes Berg4df864c2017-06-16 14:29:21 +0200717 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
Marcel Holtmann040030e2012-02-20 14:50:37 +0100718 hdr->evt = HCI_EV_STACK_INTERNAL;
719 hdr->plen = sizeof(*ev) + dlen;
720
Johannes Berg4df864c2017-06-16 14:29:21 +0200721 ev = skb_put(skb, sizeof(*ev) + dlen);
Marcel Holtmann040030e2012-02-20 14:50:37 +0100722 ev->type = type;
723 memcpy(ev->data, data, dlen);
724
725 bt_cb(skb)->incoming = 1;
726 __net_timestamp(skb);
727
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100728 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100729 hci_send_to_sock(hdev, skb);
730 kfree_skb(skb);
731}
732
733void hci_sock_dev_event(struct hci_dev *hdev, int event)
734{
Marcel Holtmann040030e2012-02-20 14:50:37 +0100735 BT_DBG("hdev %s event %d", hdev->name, event);
736
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100737 if (atomic_read(&monitor_promisc)) {
738 struct sk_buff *skb;
739
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200740 /* Send event to monitor */
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100741 skb = create_monitor_event(hdev, event);
742 if (skb) {
Marcel Holtmannc08b1a12015-03-14 19:27:59 -0700743 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
744 HCI_SOCK_TRUSTED, NULL);
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100745 kfree_skb(skb);
746 }
747 }
748
Marcel Holtmanned1b28a2015-10-04 23:33:59 +0200749 if (event <= HCI_DEV_DOWN) {
750 struct hci_ev_si_device ev;
751
752 /* Send event to sockets */
753 ev.event = event;
754 ev.dev_id = hdev->id;
755 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
756 }
Marcel Holtmann040030e2012-02-20 14:50:37 +0100757
758 if (event == HCI_DEV_UNREG) {
759 struct sock *sk;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100760
Tetsuo Handa0782c8c2021-08-04 19:26:56 +0900761 /* Wake up sockets using this dead device */
Marcel Holtmann040030e2012-02-20 14:50:37 +0100762 read_lock(&hci_sk_list.lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800763 sk_for_each(sk, &hci_sk_list.head) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100764 if (hci_pi(sk)->hdev == hdev) {
Marcel Holtmann040030e2012-02-20 14:50:37 +0100765 sk->sk_err = EPIPE;
Marcel Holtmann040030e2012-02-20 14:50:37 +0100766 sk->sk_state_change(sk);
Marcel Holtmann040030e2012-02-20 14:50:37 +0100767 }
Marcel Holtmann040030e2012-02-20 14:50:37 +0100768 }
769 read_unlock(&hci_sk_list.lock);
770 }
771}
772
Johan Hedberg801c1e82015-03-06 21:08:50 +0200773static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
774{
775 struct hci_mgmt_chan *c;
776
777 list_for_each_entry(c, &mgmt_chan_list, list) {
778 if (c->channel == channel)
779 return c;
780 }
781
782 return NULL;
783}
784
785static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
786{
787 struct hci_mgmt_chan *c;
788
789 mutex_lock(&mgmt_chan_list_lock);
790 c = __hci_mgmt_chan_find(channel);
791 mutex_unlock(&mgmt_chan_list_lock);
792
793 return c;
794}
795
796int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
797{
798 if (c->channel < HCI_CHANNEL_CONTROL)
799 return -EINVAL;
800
801 mutex_lock(&mgmt_chan_list_lock);
802 if (__hci_mgmt_chan_find(c->channel)) {
803 mutex_unlock(&mgmt_chan_list_lock);
804 return -EALREADY;
805 }
806
807 list_add_tail(&c->list, &mgmt_chan_list);
808
809 mutex_unlock(&mgmt_chan_list_lock);
810
811 return 0;
812}
813EXPORT_SYMBOL(hci_mgmt_chan_register);
814
815void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
816{
817 mutex_lock(&mgmt_chan_list_lock);
818 list_del(&c->list);
819 mutex_unlock(&mgmt_chan_list_lock);
820}
821EXPORT_SYMBOL(hci_mgmt_chan_unregister);
822
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823static int hci_sock_release(struct socket *sock)
824{
825 struct sock *sk = sock->sk;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +0100826 struct hci_dev *hdev;
Marcel Holtmann249fa162016-08-27 20:23:40 +0200827 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
829 BT_DBG("sock %p sk %p", sock, sk);
830
831 if (!sk)
832 return 0;
833
Dan Carpenter58e957b2020-01-15 20:49:04 +0300834 lock_sock(sk);
835
Marcel Holtmann70ecce92016-08-27 20:23:38 +0200836 switch (hci_pi(sk)->channel) {
837 case HCI_CHANNEL_MONITOR:
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100838 atomic_dec(&monitor_promisc);
Marcel Holtmann70ecce92016-08-27 20:23:38 +0200839 break;
Marcel Holtmannf81f5b22016-08-30 05:00:39 +0200840 case HCI_CHANNEL_RAW:
Marcel Holtmannaa1638d2016-09-01 19:48:28 +0200841 case HCI_CHANNEL_USER:
Marcel Holtmann70ecce92016-08-27 20:23:38 +0200842 case HCI_CHANNEL_CONTROL:
Marcel Holtmann249fa162016-08-27 20:23:40 +0200843 /* Send event to monitor */
844 skb = create_monitor_ctrl_close(sk);
845 if (skb) {
846 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
847 HCI_SOCK_TRUSTED, NULL);
848 kfree_skb(skb);
849 }
850
Marcel Holtmanndf1cb872016-08-30 05:00:34 +0200851 hci_sock_free_cookie(sk);
Marcel Holtmann70ecce92016-08-27 20:23:38 +0200852 break;
853 }
Marcel Holtmanncd82e612012-02-20 20:34:38 +0100854
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 bt_sock_unlink(&hci_sk_list, sk);
856
Myungho Jung3df00eb2019-02-02 16:56:36 -0800857 hdev = hci_pi(sk)->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 if (hdev) {
Marcel Holtmann23500182013-08-26 21:40:52 -0700859 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
Masahiro Yamada9332ef92017-02-27 14:28:47 -0800860 /* When releasing a user channel exclusive access,
Simon Fels6b3cc1d2015-09-02 12:10:12 +0200861 * call hci_dev_do_close directly instead of calling
862 * hci_dev_close to ensure the exclusive access will
863 * be released and the controller brought back down.
864 *
865 * The checking of HCI_AUTO_OFF is not needed in this
866 * case since it will have been cleared already when
867 * opening the user channel.
868 */
869 hci_dev_do_close(hdev);
Loic Poulain9380f9e2015-05-21 16:46:41 +0200870 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
871 mgmt_index_added(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -0700872 }
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 atomic_dec(&hdev->promisc);
875 hci_dev_put(hdev);
876 }
877
878 sock_orphan(sk);
Dan Carpenter58e957b2020-01-15 20:49:04 +0300879 release_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 sock_put(sk);
881 return 0;
882}
883
Antti Julkub2a66aa2011-06-15 12:01:14 +0300884static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200885{
886 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300887 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200888
889 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
890 return -EFAULT;
891
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300892 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300893
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300894 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300895
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300896 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300897
898 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200899}
900
Antti Julkub2a66aa2011-06-15 12:01:14 +0300901static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
Johan Hedbergf0358562010-05-18 13:20:32 +0200902{
903 bdaddr_t bdaddr;
Antti Julku5e762442011-08-25 16:48:02 +0300904 int err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200905
906 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
907 return -EFAULT;
908
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300909 hci_dev_lock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300910
Johan Hedbergdcc36c12014-07-09 12:59:13 +0300911 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
Antti Julku5e762442011-08-25 16:48:02 +0300912
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300913 hci_dev_unlock(hdev);
Antti Julku5e762442011-08-25 16:48:02 +0300914
915 return err;
Johan Hedbergf0358562010-05-18 13:20:32 +0200916}
917
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900918/* Ioctls that require bound socket */
Gustavo Padovan6039aa72012-05-23 04:04:18 -0300919static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
920 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921{
Tetsuo Handa0782c8c2021-08-04 19:26:56 +0900922 struct hci_dev *hdev = hci_hdev_from_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
Tetsuo Handa0782c8c2021-08-04 19:26:56 +0900924 if (IS_ERR(hdev))
925 return PTR_ERR(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700927 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700928 return -EBUSY;
929
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700930 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
Marcel Holtmannfee746b2014-06-29 12:13:05 +0200931 return -EOPNOTSUPP;
932
Marcel Holtmannca8bee52016-07-05 14:30:14 +0200933 if (hdev->dev_type != HCI_PRIMARY)
Marcel Holtmann5b69bef52013-10-10 10:02:08 -0700934 return -EOPNOTSUPP;
935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 switch (cmd) {
937 case HCISETRAW:
938 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000939 return -EPERM;
Marcel Holtmanndb596682014-04-16 20:04:38 -0700940 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 case HCIGETCONNINFO:
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100943 return hci_get_conn_info(hdev, (void __user *)arg);
Marcel Holtmann40be4922008-07-14 20:13:50 +0200944
945 case HCIGETAUTHINFO:
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100946 return hci_get_auth_info(hdev, (void __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Johan Hedbergf0358562010-05-18 13:20:32 +0200948 case HCIBLOCKADDR:
949 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000950 return -EPERM;
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100951 return hci_sock_blacklist_add(hdev, (void __user *)arg);
Johan Hedbergf0358562010-05-18 13:20:32 +0200952
953 case HCIUNBLOCKADDR:
954 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +0000955 return -EPERM;
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100956 return hci_sock_blacklist_del(hdev, (void __user *)arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 }
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700958
Marcel Holtmann324d36e2013-10-10 10:50:06 -0700959 return -ENOIOCTLCMD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960}
961
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300962static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
963 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964{
Marcel Holtmann8528d3f2015-11-08 07:47:11 +0100965 void __user *argp = (void __user *)arg;
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700966 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 int err;
968
969 BT_DBG("cmd %x arg %lx", cmd, arg);
970
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700971 lock_sock(sk);
972
973 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
974 err = -EBADFD;
975 goto done;
976 }
977
Marcel Holtmannf81f5b22016-08-30 05:00:39 +0200978 /* When calling an ioctl on an unbound raw socket, then ensure
979 * that the monitor gets informed. Ensure that the resulting event
980 * is only send once by checking if the cookie exists or not. The
981 * socket cookie will be only ever generated once for the lifetime
982 * of a given socket.
983 */
984 if (hci_sock_gen_cookie(sk)) {
985 struct sk_buff *skb;
986
987 if (capable(CAP_NET_ADMIN))
988 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
989
990 /* Send event to monitor */
991 skb = create_monitor_ctrl_open(sk);
992 if (skb) {
993 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
994 HCI_SOCK_TRUSTED, NULL);
995 kfree_skb(skb);
996 }
997 }
998
Marcel Holtmannc1c4f952013-08-26 09:39:55 -0700999 release_sock(sk);
1000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 switch (cmd) {
1002 case HCIGETDEVLIST:
1003 return hci_get_dev_list(argp);
1004
1005 case HCIGETDEVINFO:
1006 return hci_get_dev_info(argp);
1007
1008 case HCIGETCONNLIST:
1009 return hci_get_conn_list(argp);
1010
1011 case HCIDEVUP:
1012 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +00001013 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 return hci_dev_open(arg);
1015
1016 case HCIDEVDOWN:
1017 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +00001018 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 return hci_dev_close(arg);
1020
1021 case HCIDEVRESET:
1022 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +00001023 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 return hci_dev_reset(arg);
1025
1026 case HCIDEVRESTAT:
1027 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +00001028 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 return hci_dev_reset_stat(arg);
1030
1031 case HCISETSCAN:
1032 case HCISETAUTH:
1033 case HCISETENCRYPT:
1034 case HCISETPTYPE:
1035 case HCISETLINKPOL:
1036 case HCISETLINKMODE:
1037 case HCISETACLMTU:
1038 case HCISETSCOMTU:
1039 if (!capable(CAP_NET_ADMIN))
Zhao Hongjiangbf5b30b2012-09-20 22:37:25 +00001040 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 return hci_dev_cmd(cmd, argp);
1042
1043 case HCIINQUIRY:
1044 return hci_inquiry(argp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 }
Marcel Holtmannc1c4f952013-08-26 09:39:55 -07001046
1047 lock_sock(sk);
1048
1049 err = hci_sock_bound_ioctl(sk, cmd, arg);
1050
1051done:
1052 release_sock(sk);
1053 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054}
1055
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001056static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1057 int addr_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058{
Johan Hedberg03811012010-12-08 00:21:06 +02001059 struct sockaddr_hci haddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 struct sock *sk = sock->sk;
1061 struct hci_dev *hdev = NULL;
Marcel Holtmannf4cdbb3f2016-08-30 05:00:40 +02001062 struct sk_buff *skb;
Johan Hedberg03811012010-12-08 00:21:06 +02001063 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
1065 BT_DBG("sock %p sk %p", sock, sk);
1066
Johan Hedberg03811012010-12-08 00:21:06 +02001067 if (!addr)
1068 return -EINVAL;
1069
1070 memset(&haddr, 0, sizeof(haddr));
1071 len = min_t(unsigned int, sizeof(haddr), addr_len);
1072 memcpy(&haddr, addr, len);
1073
1074 if (haddr.hci_family != AF_BLUETOOTH)
1075 return -EINVAL;
1076
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 lock_sock(sk);
1078
Tetsuo Handa0782c8c2021-08-04 19:26:56 +09001079 /* Allow detaching from dead device and attaching to alive device, if
1080 * the caller wants to re-bind (instead of close) this socket in
1081 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1082 */
1083 hdev = hci_pi(sk)->hdev;
1084 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1085 hci_pi(sk)->hdev = NULL;
1086 sk->sk_state = BT_OPEN;
1087 hci_dev_put(hdev);
1088 }
1089 hdev = NULL;
1090
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001091 if (sk->sk_state == BT_BOUND) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 err = -EALREADY;
1093 goto done;
1094 }
1095
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001096 switch (haddr.hci_channel) {
1097 case HCI_CHANNEL_RAW:
1098 if (hci_pi(sk)->hdev) {
1099 err = -EALREADY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 goto done;
1101 }
1102
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001103 if (haddr.hci_dev != HCI_DEV_NONE) {
1104 hdev = hci_dev_get(haddr.hci_dev);
1105 if (!hdev) {
1106 err = -ENODEV;
1107 goto done;
1108 }
1109
1110 atomic_inc(&hdev->promisc);
1111 }
1112
Marcel Holtmann5a6d2cf2016-08-30 05:00:37 +02001113 hci_pi(sk)->channel = haddr.hci_channel;
Marcel Holtmannf81f5b22016-08-30 05:00:39 +02001114
Marcel Holtmannf4cdbb3f2016-08-30 05:00:40 +02001115 if (!hci_sock_gen_cookie(sk)) {
1116 /* In the case when a cookie has already been assigned,
1117 * then there has been already an ioctl issued against
1118 * an unbound socket and with that triggerd an open
1119 * notification. Send a close notification first to
1120 * allow the state transition to bounded.
1121 */
1122 skb = create_monitor_ctrl_close(sk);
Marcel Holtmannf81f5b22016-08-30 05:00:39 +02001123 if (skb) {
1124 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1125 HCI_SOCK_TRUSTED, NULL);
1126 kfree_skb(skb);
1127 }
1128 }
Marcel Holtmannf4cdbb3f2016-08-30 05:00:40 +02001129
1130 if (capable(CAP_NET_ADMIN))
1131 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1132
1133 hci_pi(sk)->hdev = hdev;
1134
1135 /* Send event to monitor */
1136 skb = create_monitor_ctrl_open(sk);
1137 if (skb) {
1138 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1139 HCI_SOCK_TRUSTED, NULL);
1140 kfree_skb(skb);
1141 }
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001142 break;
1143
Marcel Holtmann23500182013-08-26 21:40:52 -07001144 case HCI_CHANNEL_USER:
1145 if (hci_pi(sk)->hdev) {
1146 err = -EALREADY;
1147 goto done;
1148 }
1149
1150 if (haddr.hci_dev == HCI_DEV_NONE) {
1151 err = -EINVAL;
1152 goto done;
1153 }
1154
Marcel Holtmann10a8b862013-10-01 22:59:24 -07001155 if (!capable(CAP_NET_ADMIN)) {
Marcel Holtmann23500182013-08-26 21:40:52 -07001156 err = -EPERM;
1157 goto done;
1158 }
1159
1160 hdev = hci_dev_get(haddr.hci_dev);
1161 if (!hdev) {
1162 err = -ENODEV;
1163 goto done;
1164 }
1165
Marcel Holtmann781f8992015-06-06 06:06:49 +02001166 if (test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001167 hci_dev_test_flag(hdev, HCI_SETUP) ||
Marcel Holtmann781f8992015-06-06 06:06:49 +02001168 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1169 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1170 test_bit(HCI_UP, &hdev->flags))) {
Marcel Holtmann23500182013-08-26 21:40:52 -07001171 err = -EBUSY;
1172 hci_dev_put(hdev);
1173 goto done;
1174 }
1175
Marcel Holtmann238be782015-03-13 02:11:06 -07001176 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
Marcel Holtmann23500182013-08-26 21:40:52 -07001177 err = -EUSERS;
1178 hci_dev_put(hdev);
1179 goto done;
1180 }
1181
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02001182 mgmt_index_removed(hdev);
Marcel Holtmann23500182013-08-26 21:40:52 -07001183
1184 err = hci_dev_open(hdev->id);
1185 if (err) {
Marcel Holtmann781f8992015-06-06 06:06:49 +02001186 if (err == -EALREADY) {
1187 /* In case the transport is already up and
1188 * running, clear the error here.
1189 *
Masahiro Yamada9332ef92017-02-27 14:28:47 -08001190 * This can happen when opening a user
Marcel Holtmann781f8992015-06-06 06:06:49 +02001191 * channel and HCI_AUTO_OFF grace period
1192 * is still active.
1193 */
1194 err = 0;
1195 } else {
1196 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1197 mgmt_index_added(hdev);
1198 hci_dev_put(hdev);
1199 goto done;
1200 }
Marcel Holtmann23500182013-08-26 21:40:52 -07001201 }
1202
Marcel Holtmann5a6d2cf2016-08-30 05:00:37 +02001203 hci_pi(sk)->channel = haddr.hci_channel;
Marcel Holtmannaa1638d2016-09-01 19:48:28 +02001204
1205 if (!hci_sock_gen_cookie(sk)) {
1206 /* In the case when a cookie has already been assigned,
1207 * this socket will transition from a raw socket into
Masahiro Yamada9332ef92017-02-27 14:28:47 -08001208 * a user channel socket. For a clean transition, send
Marcel Holtmannaa1638d2016-09-01 19:48:28 +02001209 * the close notification first.
1210 */
1211 skb = create_monitor_ctrl_close(sk);
1212 if (skb) {
1213 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1214 HCI_SOCK_TRUSTED, NULL);
1215 kfree_skb(skb);
1216 }
1217 }
1218
1219 /* The user channel is restricted to CAP_NET_ADMIN
1220 * capabilities and with that implicitly trusted.
1221 */
1222 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1223
Marcel Holtmann23500182013-08-26 21:40:52 -07001224 hci_pi(sk)->hdev = hdev;
Marcel Holtmann5a6d2cf2016-08-30 05:00:37 +02001225
Marcel Holtmannaa1638d2016-09-01 19:48:28 +02001226 /* Send event to monitor */
1227 skb = create_monitor_ctrl_open(sk);
1228 if (skb) {
1229 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1230 HCI_SOCK_TRUSTED, NULL);
1231 kfree_skb(skb);
1232 }
1233
Marcel Holtmann5a6d2cf2016-08-30 05:00:37 +02001234 atomic_inc(&hdev->promisc);
Marcel Holtmann23500182013-08-26 21:40:52 -07001235 break;
1236
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001237 case HCI_CHANNEL_MONITOR:
1238 if (haddr.hci_dev != HCI_DEV_NONE) {
1239 err = -EINVAL;
1240 goto done;
1241 }
1242
1243 if (!capable(CAP_NET_RAW)) {
1244 err = -EPERM;
1245 goto done;
1246 }
1247
Marcel Holtmann5a6d2cf2016-08-30 05:00:37 +02001248 hci_pi(sk)->channel = haddr.hci_channel;
1249
Marcel Holtmann50ebc052015-03-14 19:27:58 -07001250 /* The monitor interface is restricted to CAP_NET_RAW
1251 * capabilities and with that implicitly trusted.
1252 */
1253 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1254
Johannes Berg787b3062016-01-06 14:38:40 +01001255 send_monitor_note(sk, "Linux version %s (%s)",
1256 init_utsname()->release,
1257 init_utsname()->machine);
Marcel Holtmann9e8305b2016-08-30 05:00:35 +02001258 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1259 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001260 send_monitor_replay(sk);
Marcel Holtmann249fa162016-08-27 20:23:40 +02001261 send_monitor_control_replay(sk);
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001262
1263 atomic_inc(&monitor_promisc);
1264 break;
1265
Marcel Holtmannac714942015-11-08 07:47:13 +01001266 case HCI_CHANNEL_LOGGING:
1267 if (haddr.hci_dev != HCI_DEV_NONE) {
1268 err = -EINVAL;
1269 goto done;
1270 }
1271
1272 if (!capable(CAP_NET_ADMIN)) {
1273 err = -EPERM;
1274 goto done;
1275 }
Marcel Holtmann5a6d2cf2016-08-30 05:00:37 +02001276
1277 hci_pi(sk)->channel = haddr.hci_channel;
Marcel Holtmannac714942015-11-08 07:47:13 +01001278 break;
1279
Marcel Holtmann7cc2ade2012-02-20 14:50:35 +01001280 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001281 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1282 err = -EINVAL;
1283 goto done;
1284 }
1285
1286 if (haddr.hci_dev != HCI_DEV_NONE) {
1287 err = -EINVAL;
1288 goto done;
1289 }
1290
Marcel Holtmann1195fbb2015-03-14 19:28:04 -07001291 /* Users with CAP_NET_ADMIN capabilities are allowed
1292 * access to all management commands and events. For
1293 * untrusted users the interface is restricted and
1294 * also only untrusted events are sent.
Marcel Holtmann50ebc052015-03-14 19:27:58 -07001295 */
Marcel Holtmann1195fbb2015-03-14 19:28:04 -07001296 if (capable(CAP_NET_ADMIN))
1297 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
Marcel Holtmann50ebc052015-03-14 19:27:58 -07001298
Marcel Holtmann5a6d2cf2016-08-30 05:00:37 +02001299 hci_pi(sk)->channel = haddr.hci_channel;
1300
Marcel Holtmannf9207332015-03-14 19:27:55 -07001301 /* At the moment the index and unconfigured index events
1302 * are enabled unconditionally. Setting them on each
1303 * socket when binding keeps this functionality. They
1304 * however might be cleared later and then sending of these
1305 * events will be disabled, but that is then intentional.
Marcel Holtmannf6b77122015-03-14 19:28:05 -07001306 *
1307 * This also enables generic events that are safe to be
1308 * received by untrusted users. Example for such events
1309 * are changes to settings, class of device, name etc.
Marcel Holtmannf9207332015-03-14 19:27:55 -07001310 */
Marcel Holtmann5a6d2cf2016-08-30 05:00:37 +02001311 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
Marcel Holtmannf4cdbb3f2016-08-30 05:00:40 +02001312 if (!hci_sock_gen_cookie(sk)) {
1313 /* In the case when a cookie has already been
1314 * assigned, this socket will transtion from
1315 * a raw socket into a control socket. To
1316 * allow for a clean transtion, send the
1317 * close notification first.
1318 */
1319 skb = create_monitor_ctrl_close(sk);
1320 if (skb) {
1321 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1322 HCI_SOCK_TRUSTED, NULL);
1323 kfree_skb(skb);
1324 }
1325 }
Marcel Holtmann70ecce92016-08-27 20:23:38 +02001326
Marcel Holtmann249fa162016-08-27 20:23:40 +02001327 /* Send event to monitor */
1328 skb = create_monitor_ctrl_open(sk);
1329 if (skb) {
1330 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1331 HCI_SOCK_TRUSTED, NULL);
1332 kfree_skb(skb);
1333 }
1334
Marcel Holtmannf9207332015-03-14 19:27:55 -07001335 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1336 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
Marcel Holtmann5504c3a2016-08-29 06:19:46 +02001337 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1338 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1339 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1340 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
Marcel Holtmannf9207332015-03-14 19:27:55 -07001341 }
Johan Hedberg801c1e82015-03-06 21:08:50 +02001342 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 }
1344
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 sk->sk_state = BT_BOUND;
1346
1347done:
1348 release_sock(sk);
1349 return err;
1350}
1351
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001352static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1353 int *addr_len, int peer)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354{
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001355 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 struct sock *sk = sock->sk;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001357 struct hci_dev *hdev;
1358 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
1360 BT_DBG("sock %p sk %p", sock, sk);
1361
Marcel Holtmann06f43cb2013-08-26 00:06:30 -07001362 if (peer)
1363 return -EOPNOTSUPP;
1364
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 lock_sock(sk);
1366
Tetsuo Handa0782c8c2021-08-04 19:26:56 +09001367 hdev = hci_hdev_from_sock(sk);
1368 if (IS_ERR(hdev)) {
1369 err = PTR_ERR(hdev);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001370 goto done;
1371 }
1372
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 *addr_len = sizeof(*haddr);
1374 haddr->hci_family = AF_BLUETOOTH;
Marcel Holtmann7b005bd2006-02-13 11:40:03 +01001375 haddr->hci_dev = hdev->id;
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001376 haddr->hci_channel= hci_pi(sk)->channel;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001378done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 release_sock(sk);
Marcel Holtmann9d4b68b2013-08-26 00:20:37 -07001380 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381}
1382
Gustavo Padovan6039aa72012-05-23 04:04:18 -03001383static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1384 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385{
1386 __u32 mask = hci_pi(sk)->cmsg_mask;
1387
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001388 if (mask & HCI_CMSG_DIR) {
1389 int incoming = bt_cb(skb)->incoming;
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001390 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1391 &incoming);
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001392 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001394 if (mask & HCI_CMSG_TSTAMP) {
Johann Felix Sodenf6e623a2010-02-15 22:23:48 +01001395#ifdef CONFIG_COMPAT
1396 struct compat_timeval ctv;
1397#endif
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001398 struct timeval tv;
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001399 void *data;
1400 int len;
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001401
1402 skb_get_timestamp(skb, &tv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001403
David S. Miller1da97f82007-09-12 14:10:58 +02001404 data = &tv;
1405 len = sizeof(tv);
1406#ifdef CONFIG_COMPAT
H. J. Luda88cea2012-02-10 14:12:15 -08001407 if (!COMPAT_USE_64BIT_TIME &&
1408 (msg->msg_flags & MSG_CMSG_COMPAT)) {
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001409 ctv.tv_sec = tv.tv_sec;
1410 ctv.tv_usec = tv.tv_usec;
1411 data = &ctv;
1412 len = sizeof(ctv);
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001413 }
David S. Miller1da97f82007-09-12 14:10:58 +02001414#endif
Marcel Holtmann767c5eb2007-09-09 08:39:34 +02001415
1416 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418}
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001419
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001420static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1421 size_t len, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422{
1423 int noblock = flags & MSG_DONTWAIT;
1424 struct sock *sk = sock->sk;
1425 struct sk_buff *skb;
1426 int copied, err;
Denis Kenzior83871f82016-06-27 11:01:13 -05001427 unsigned int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
1429 BT_DBG("sock %p, sk %p", sock, sk);
1430
Marcel Holtmannd94a6102015-10-25 22:45:18 +01001431 if (flags & MSG_OOB)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 return -EOPNOTSUPP;
1433
Marcel Holtmannac714942015-11-08 07:47:13 +01001434 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1435 return -EOPNOTSUPP;
1436
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 if (sk->sk_state == BT_CLOSED)
1438 return 0;
1439
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001440 skb = skb_recv_datagram(sk, flags, noblock, &err);
1441 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 return err;
1443
Denis Kenzior83871f82016-06-27 11:01:13 -05001444 skblen = skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 copied = skb->len;
1446 if (len < copied) {
1447 msg->msg_flags |= MSG_TRUNC;
1448 copied = len;
1449 }
1450
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001451 skb_reset_transport_header(skb);
David S. Miller51f3d022014-11-05 16:46:40 -05001452 err = skb_copy_datagram_msg(skb, 0, msg, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
Marcel Holtmann3a208622012-02-20 14:50:34 +01001454 switch (hci_pi(sk)->channel) {
1455 case HCI_CHANNEL_RAW:
1456 hci_sock_cmsg(sk, msg, skb);
1457 break;
Marcel Holtmann23500182013-08-26 21:40:52 -07001458 case HCI_CHANNEL_USER:
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001459 case HCI_CHANNEL_MONITOR:
1460 sock_recv_timestamp(msg, sk, skb);
1461 break;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001462 default:
1463 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1464 sock_recv_timestamp(msg, sk, skb);
1465 break;
Marcel Holtmann3a208622012-02-20 14:50:34 +01001466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
1468 skb_free_datagram(sk, skb);
1469
Luiz Augusto von Dentz4f342282016-08-15 16:02:20 +03001470 if (flags & MSG_TRUNC)
Denis Kenzior83871f82016-06-27 11:01:13 -05001471 copied = skblen;
1472
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 return err ? : copied;
1474}
1475
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001476static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1477 struct msghdr *msg, size_t msglen)
1478{
1479 void *buf;
1480 u8 *cp;
1481 struct mgmt_hdr *hdr;
1482 u16 opcode, index, len;
1483 struct hci_dev *hdev = NULL;
1484 const struct hci_mgmt_handler *handler;
1485 bool var_len, no_hdev;
1486 int err;
1487
1488 BT_DBG("got %zu bytes", msglen);
1489
1490 if (msglen < sizeof(*hdr))
1491 return -EINVAL;
1492
1493 buf = kmalloc(msglen, GFP_KERNEL);
1494 if (!buf)
1495 return -ENOMEM;
1496
1497 if (memcpy_from_msg(buf, msg, msglen)) {
1498 err = -EFAULT;
1499 goto done;
1500 }
1501
1502 hdr = buf;
1503 opcode = __le16_to_cpu(hdr->opcode);
1504 index = __le16_to_cpu(hdr->index);
1505 len = __le16_to_cpu(hdr->len);
1506
1507 if (len != msglen - sizeof(*hdr)) {
1508 err = -EINVAL;
1509 goto done;
1510 }
1511
Marcel Holtmann38ceaa02016-08-27 20:23:41 +02001512 if (chan->channel == HCI_CHANNEL_CONTROL) {
1513 struct sk_buff *skb;
1514
1515 /* Send event to monitor */
1516 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1517 buf + sizeof(*hdr));
1518 if (skb) {
1519 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1520 HCI_SOCK_TRUSTED, NULL);
1521 kfree_skb(skb);
1522 }
1523 }
1524
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001525 if (opcode >= chan->handler_count ||
1526 chan->handlers[opcode].func == NULL) {
1527 BT_DBG("Unknown op %u", opcode);
1528 err = mgmt_cmd_status(sk, index, opcode,
1529 MGMT_STATUS_UNKNOWN_COMMAND);
1530 goto done;
1531 }
1532
1533 handler = &chan->handlers[opcode];
1534
1535 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1536 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1537 err = mgmt_cmd_status(sk, index, opcode,
1538 MGMT_STATUS_PERMISSION_DENIED);
1539 goto done;
1540 }
1541
1542 if (index != MGMT_INDEX_NONE) {
1543 hdev = hci_dev_get(index);
1544 if (!hdev) {
1545 err = mgmt_cmd_status(sk, index, opcode,
1546 MGMT_STATUS_INVALID_INDEX);
1547 goto done;
1548 }
1549
1550 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1551 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1552 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1553 err = mgmt_cmd_status(sk, index, opcode,
1554 MGMT_STATUS_INVALID_INDEX);
1555 goto done;
1556 }
1557
1558 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1559 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1560 err = mgmt_cmd_status(sk, index, opcode,
1561 MGMT_STATUS_INVALID_INDEX);
1562 goto done;
1563 }
1564 }
1565
1566 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1567 if (no_hdev != !hdev) {
1568 err = mgmt_cmd_status(sk, index, opcode,
1569 MGMT_STATUS_INVALID_INDEX);
1570 goto done;
1571 }
1572
1573 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1574 if ((var_len && len < handler->data_len) ||
1575 (!var_len && len != handler->data_len)) {
1576 err = mgmt_cmd_status(sk, index, opcode,
1577 MGMT_STATUS_INVALID_PARAMS);
1578 goto done;
1579 }
1580
1581 if (hdev && chan->hdev_init)
1582 chan->hdev_init(sk, hdev);
1583
1584 cp = buf + sizeof(*hdr);
1585
1586 err = handler->func(sk, hdev, cp, len);
1587 if (err < 0)
1588 goto done;
1589
1590 err = msglen;
1591
1592done:
1593 if (hdev)
1594 hci_dev_put(hdev);
1595
1596 kfree(buf);
1597 return err;
1598}
1599
Marcel Holtmannac714942015-11-08 07:47:13 +01001600static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1601{
1602 struct hci_mon_hdr *hdr;
1603 struct sk_buff *skb;
1604 struct hci_dev *hdev;
1605 u16 index;
1606 int err;
1607
1608 /* The logging frame consists at minimum of the standard header,
1609 * the priority byte, the ident length byte and at least one string
1610 * terminator NUL byte. Anything shorter are invalid packets.
1611 */
1612 if (len < sizeof(*hdr) + 3)
1613 return -EINVAL;
1614
1615 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1616 if (!skb)
1617 return err;
1618
1619 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1620 err = -EFAULT;
1621 goto drop;
1622 }
1623
1624 hdr = (void *)skb->data;
1625
1626 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1627 err = -EINVAL;
1628 goto drop;
1629 }
1630
1631 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1632 __u8 priority = skb->data[sizeof(*hdr)];
1633 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1634
1635 /* Only the priorities 0-7 are valid and with that any other
1636 * value results in an invalid packet.
1637 *
1638 * The priority byte is followed by an ident length byte and
1639 * the NUL terminated ident string. Check that the ident
1640 * length is not overflowing the packet and also that the
1641 * ident string itself is NUL terminated. In case the ident
1642 * length is zero, the length value actually doubles as NUL
1643 * terminator identifier.
1644 *
1645 * The message follows the ident string (if present) and
1646 * must be NUL terminated. Otherwise it is not a valid packet.
1647 */
1648 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1649 ident_len > len - sizeof(*hdr) - 3 ||
1650 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1651 err = -EINVAL;
1652 goto drop;
1653 }
1654 } else {
1655 err = -EINVAL;
1656 goto drop;
1657 }
1658
1659 index = __le16_to_cpu(hdr->index);
1660
1661 if (index != MGMT_INDEX_NONE) {
1662 hdev = hci_dev_get(index);
1663 if (!hdev) {
1664 err = -ENODEV;
1665 goto drop;
1666 }
1667 } else {
1668 hdev = NULL;
1669 }
1670
1671 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1672
1673 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1674 err = len;
1675
1676 if (hdev)
1677 hci_dev_put(hdev);
1678
1679drop:
1680 kfree_skb(skb);
1681 return err;
1682}
1683
Ying Xue1b784142015-03-02 15:37:48 +08001684static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1685 size_t len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686{
1687 struct sock *sk = sock->sk;
Johan Hedberg801c1e82015-03-06 21:08:50 +02001688 struct hci_mgmt_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 struct hci_dev *hdev;
1690 struct sk_buff *skb;
1691 int err;
1692
1693 BT_DBG("sock %p sk %p", sock, sk);
1694
1695 if (msg->msg_flags & MSG_OOB)
1696 return -EOPNOTSUPP;
1697
Szymon Jancab89f0b2017-04-24 18:25:04 -07001698 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1699 MSG_CMSG_COMPAT))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 return -EINVAL;
1701
1702 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1703 return -EINVAL;
1704
1705 lock_sock(sk);
1706
Johan Hedberg03811012010-12-08 00:21:06 +02001707 switch (hci_pi(sk)->channel) {
1708 case HCI_CHANNEL_RAW:
Marcel Holtmann23500182013-08-26 21:40:52 -07001709 case HCI_CHANNEL_USER:
Johan Hedberg03811012010-12-08 00:21:06 +02001710 break;
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001711 case HCI_CHANNEL_MONITOR:
1712 err = -EOPNOTSUPP;
1713 goto done;
Marcel Holtmannac714942015-11-08 07:47:13 +01001714 case HCI_CHANNEL_LOGGING:
1715 err = hci_logging_frame(sk, msg, len);
1716 goto done;
Johan Hedberg03811012010-12-08 00:21:06 +02001717 default:
Johan Hedberg801c1e82015-03-06 21:08:50 +02001718 mutex_lock(&mgmt_chan_list_lock);
1719 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1720 if (chan)
Johan Hedbergfa4335d2015-03-17 13:48:50 +02001721 err = hci_mgmt_cmd(chan, sk, msg, len);
Johan Hedberg801c1e82015-03-06 21:08:50 +02001722 else
1723 err = -EINVAL;
1724
1725 mutex_unlock(&mgmt_chan_list_lock);
Johan Hedberg03811012010-12-08 00:21:06 +02001726 goto done;
1727 }
1728
Tetsuo Handa0782c8c2021-08-04 19:26:56 +09001729 hdev = hci_hdev_from_sock(sk);
1730 if (IS_ERR(hdev)) {
1731 err = PTR_ERR(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 goto done;
1733 }
1734
Marcel Holtmann7e21add2009-11-18 01:05:00 +01001735 if (!test_bit(HCI_UP, &hdev->flags)) {
1736 err = -ENETDOWN;
1737 goto done;
1738 }
1739
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001740 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1741 if (!skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 goto done;
1743
Al Viro6ce8e9c2014-04-06 21:25:44 -04001744 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 err = -EFAULT;
1746 goto drop;
1747 }
1748
Marcel Holtmann8528d3f2015-11-08 07:47:11 +01001749 hci_skb_pkt_type(skb) = skb->data[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 skb_pull(skb, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001752 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1753 /* No permission check is needed for user channel
1754 * since that gets enforced when binding the socket.
1755 *
1756 * However check that the packet type is valid.
1757 */
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001758 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1759 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1760 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmann1bc5ad12013-12-17 03:21:25 -08001761 err = -EINVAL;
1762 goto drop;
1763 }
1764
1765 skb_queue_tail(&hdev->raw_q, skb);
1766 queue_work(hdev->workqueue, &hdev->tx_work);
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001767 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
Harvey Harrison83985312008-05-02 16:25:46 -07001768 u16 opcode = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 u16 ogf = hci_opcode_ogf(opcode);
1770 u16 ocf = hci_opcode_ocf(opcode);
1771
1772 if (((ogf > HCI_SFLT_MAX_OGF) ||
Gustavo Padovan3bb3c752012-05-17 00:36:22 -03001773 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1774 &hci_sec_filter.ocf_mask[ogf])) &&
1775 !capable(CAP_NET_RAW)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 err = -EPERM;
1777 goto drop;
1778 }
1779
Marcel Holtmann19821622015-11-06 07:42:20 +01001780 /* Since the opcode has already been extracted here, store
1781 * a copy of the value for later use by the drivers.
1782 */
1783 hci_skb_opcode(skb) = opcode;
1784
Marcel Holtmannfee746b2014-06-29 12:13:05 +02001785 if (ogf == 0x3f) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001787 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 } else {
Stephen Hemminger49c922b2014-10-27 21:12:20 -07001789 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02001790 * single-command requests.
1791 */
Johan Hedberg44d27132015-11-05 09:31:40 +02001792 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg11714b32013-03-05 20:37:47 +02001793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001795 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 }
1797 } else {
1798 if (!capable(CAP_NET_RAW)) {
1799 err = -EPERM;
1800 goto drop;
1801 }
1802
Marcel Holtmannd79f34e2015-11-05 07:10:00 +01001803 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1804 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
Marcel Holtmannbb775432015-10-09 16:13:50 +02001805 err = -EINVAL;
1806 goto drop;
1807 }
1808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 skb_queue_tail(&hdev->raw_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001810 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 }
1812
1813 err = len;
1814
1815done:
1816 release_sock(sk);
1817 return err;
1818
1819drop:
1820 kfree_skb(skb);
1821 goto done;
1822}
1823
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001824static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1825 char __user *optval, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826{
1827 struct hci_ufilter uf = { .opcode = 0 };
1828 struct sock *sk = sock->sk;
1829 int err = 0, opt = 0;
1830
1831 BT_DBG("sk %p, opt %d", sk, optname);
1832
Marcel Holtmann47b0f572016-08-27 20:23:37 +02001833 if (level != SOL_HCI)
1834 return -ENOPROTOOPT;
1835
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 lock_sock(sk);
1837
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001838 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001839 err = -EBADFD;
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001840 goto done;
1841 }
1842
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 switch (optname) {
1844 case HCI_DATA_DIR:
1845 if (get_user(opt, (int __user *)optval)) {
1846 err = -EFAULT;
1847 break;
1848 }
1849
1850 if (opt)
1851 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1852 else
1853 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1854 break;
1855
1856 case HCI_TIME_STAMP:
1857 if (get_user(opt, (int __user *)optval)) {
1858 err = -EFAULT;
1859 break;
1860 }
1861
1862 if (opt)
1863 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1864 else
1865 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1866 break;
1867
1868 case HCI_FILTER:
Marcel Holtmann0878b662007-05-05 00:35:59 +02001869 {
1870 struct hci_filter *f = &hci_pi(sk)->filter;
1871
1872 uf.type_mask = f->type_mask;
1873 uf.opcode = f->opcode;
1874 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1875 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1876 }
1877
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 len = min_t(unsigned int, len, sizeof(uf));
1879 if (copy_from_user(&uf, optval, len)) {
1880 err = -EFAULT;
1881 break;
1882 }
1883
1884 if (!capable(CAP_NET_RAW)) {
1885 uf.type_mask &= hci_sec_filter.type_mask;
1886 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1887 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1888 }
1889
1890 {
1891 struct hci_filter *f = &hci_pi(sk)->filter;
1892
1893 f->type_mask = uf.type_mask;
1894 f->opcode = uf.opcode;
1895 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1896 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1897 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001898 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
1900 default:
1901 err = -ENOPROTOOPT;
1902 break;
1903 }
1904
Marcel Holtmann2f39cdb2012-02-20 14:50:32 +01001905done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 release_sock(sk);
1907 return err;
1908}
1909
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001910static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1911 char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912{
1913 struct hci_ufilter uf;
1914 struct sock *sk = sock->sk;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001915 int len, opt, err = 0;
1916
1917 BT_DBG("sk %p, opt %d", sk, optname);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918
Marcel Holtmann47b0f572016-08-27 20:23:37 +02001919 if (level != SOL_HCI)
1920 return -ENOPROTOOPT;
1921
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 if (get_user(len, optlen))
1923 return -EFAULT;
1924
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001925 lock_sock(sk);
1926
1927 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
Marcel Holtmannc2371e82013-08-26 09:29:39 -07001928 err = -EBADFD;
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001929 goto done;
1930 }
1931
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 switch (optname) {
1933 case HCI_DATA_DIR:
1934 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1935 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001936 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 opt = 0;
1938
1939 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001940 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 break;
1942
1943 case HCI_TIME_STAMP:
1944 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1945 opt = 1;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001946 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 opt = 0;
1948
1949 if (put_user(opt, optval))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001950 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 break;
1952
1953 case HCI_FILTER:
1954 {
1955 struct hci_filter *f = &hci_pi(sk)->filter;
1956
Mathias Krausee15ca9a2012-08-15 11:31:46 +00001957 memset(&uf, 0, sizeof(uf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 uf.type_mask = f->type_mask;
1959 uf.opcode = f->opcode;
1960 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1961 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1962 }
1963
1964 len = min_t(unsigned int, len, sizeof(uf));
1965 if (copy_to_user(optval, &uf, len))
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001966 err = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 break;
1968
1969 default:
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001970 err = -ENOPROTOOPT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 break;
1972 }
1973
Marcel Holtmanncedc5462012-02-20 14:50:33 +01001974done:
1975 release_sock(sk);
1976 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977}
1978
Nguyen Dinh Phic214be92021-10-08 03:04:24 +08001979static void hci_sock_destruct(struct sock *sk)
1980{
1981 skb_queue_purge(&sk->sk_receive_queue);
1982 skb_queue_purge(&sk->sk_write_queue);
1983}
1984
Eric Dumazet90ddc4f2005-12-22 12:49:22 -08001985static const struct proto_ops hci_sock_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 .family = PF_BLUETOOTH,
1987 .owner = THIS_MODULE,
1988 .release = hci_sock_release,
1989 .bind = hci_sock_bind,
1990 .getname = hci_sock_getname,
1991 .sendmsg = hci_sock_sendmsg,
1992 .recvmsg = hci_sock_recvmsg,
1993 .ioctl = hci_sock_ioctl,
1994 .poll = datagram_poll,
1995 .listen = sock_no_listen,
1996 .shutdown = sock_no_shutdown,
1997 .setsockopt = hci_sock_setsockopt,
1998 .getsockopt = hci_sock_getsockopt,
1999 .connect = sock_no_connect,
2000 .socketpair = sock_no_socketpair,
2001 .accept = sock_no_accept,
2002 .mmap = sock_no_mmap
2003};
2004
2005static struct proto hci_sk_proto = {
2006 .name = "HCI",
2007 .owner = THIS_MODULE,
2008 .obj_size = sizeof(struct hci_pinfo)
2009};
2010
Eric Paris3f378b62009-11-05 22:18:14 -08002011static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2012 int kern)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013{
2014 struct sock *sk;
2015
2016 BT_DBG("sock %p", sock);
2017
2018 if (sock->type != SOCK_RAW)
2019 return -ESOCKTNOSUPPORT;
2020
2021 sock->ops = &hci_sock_ops;
2022
Eric W. Biederman11aa9c22015-05-08 21:09:13 -05002023 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 if (!sk)
2025 return -ENOMEM;
2026
2027 sock_init_data(sock, sk);
2028
2029 sock_reset_flag(sk, SOCK_ZAPPED);
2030
2031 sk->sk_protocol = protocol;
2032
2033 sock->state = SS_UNCONNECTED;
2034 sk->sk_state = BT_OPEN;
Nguyen Dinh Phic214be92021-10-08 03:04:24 +08002035 sk->sk_destruct = hci_sock_destruct;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036
2037 bt_sock_link(&hci_sk_list, sk);
2038 return 0;
2039}
2040
Stephen Hemmingerec1b4cf2009-10-05 05:58:39 +00002041static const struct net_proto_family hci_sock_family_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 .family = PF_BLUETOOTH,
2043 .owner = THIS_MODULE,
2044 .create = hci_sock_create,
2045};
2046
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047int __init hci_sock_init(void)
2048{
2049 int err;
2050
Marcel Holtmannb0a8e282015-01-11 15:18:17 -08002051 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 err = proto_register(&hci_sk_proto, 0);
2054 if (err < 0)
2055 return err;
2056
2057 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09002058 if (err < 0) {
2059 BT_ERR("HCI socket registration failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 goto error;
Masatake YAMATOf7c86632012-07-26 01:28:36 +09002061 }
2062
Al Virob0316612013-04-04 19:14:33 -04002063 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
Masatake YAMATOf7c86632012-07-26 01:28:36 +09002064 if (err < 0) {
2065 BT_ERR("Failed to create HCI proc file");
2066 bt_sock_unregister(BTPROTO_HCI);
2067 goto error;
2068 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 BT_INFO("HCI socket layer initialized");
2071
2072 return 0;
2073
2074error:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 proto_unregister(&hci_sk_proto);
2076 return err;
2077}
2078
Anand Gadiyarb7440a142011-02-22 12:43:09 +05302079void hci_sock_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080{
Masatake YAMATOf7c86632012-07-26 01:28:36 +09002081 bt_procfs_cleanup(&init_net, "hci");
David Herrmann5e9d7f82013-02-24 19:36:51 +01002082 bt_sock_unregister(BTPROTO_HCI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 proto_unregister(&hci_sk_proto);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084}