[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[hurd] 04/05: New upstream snapshot
From: |
Samuel Thibault |
Subject: |
[hurd] 04/05: New upstream snapshot |
Date: |
Tue, 22 Sep 2015 21:51:56 +0000 |
This is an automated email from the git hooks/post-receive script.
sthibault pushed a commit to branch master
in repository hurd.
commit 9ce028960237e6e69b63a3e6ac2f4fdff6d05047
Author: Samuel Thibault <address@hidden>
Date: Tue Sep 22 18:58:38 2015 +0000
New upstream snapshot
---
.../contrib/include/linux/compiler-gcc5.h | 25 +
libdde-linux26/contrib/include/linux/mdio.h | 356 ++++++++++
libdde-linux26/contrib/include/net/ethoc.h | 22 +
libdde-linux26/contrib/kernel/rcuclassic.c | 788 +++++++++++++++++++++
libdde-linux26/contrib/lib/devres.c | 351 +++++++++
5 files changed, 1542 insertions(+)
diff --git a/libdde-linux26/contrib/include/linux/compiler-gcc5.h
b/libdde-linux26/contrib/include/linux/compiler-gcc5.h
new file mode 100644
index 0000000..34c5861
--- /dev/null
+++ b/libdde-linux26/contrib/include/linux/compiler-gcc5.h
@@ -0,0 +1,25 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc5.h> directly, include
<linux/compiler.h> instead."
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
+#define __always_inline inline __attribute__((always_inline))
+
+/*
+ * A trick to suppress uninitialized variable warning without generating any
+ * code
+ */
+#define uninitialized_var(x) x = x
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
diff --git a/libdde-linux26/contrib/include/linux/mdio.h
b/libdde-linux26/contrib/include/linux/mdio.h
new file mode 100644
index 0000000..c779b49
--- /dev/null
+++ b/libdde-linux26/contrib/include/linux/mdio.h
@@ -0,0 +1,356 @@
+/*
+ * linux/mdio.h: definitions for MDIO (clause 45) transceivers
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef __LINUX_MDIO_H__
+#define __LINUX_MDIO_H__
+
+#include <linux/mii.h>
+
+/* MDIO Manageable Devices (MMDs). */
+#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment/
+ * Physical Medium Dependent */
+#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */
+#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */
+#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */
+#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */
+#define MDIO_MMD_TC 6 /* Transmission Convergence */
+#define MDIO_MMD_AN 7 /* Auto-Negotiation */
+#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */
+#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */
+#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */
+
+/* Generic MDIO registers. */
+#define MDIO_CTRL1 MII_BMCR
+#define MDIO_STAT1 MII_BMSR
+#define MDIO_DEVID1 MII_PHYSID1
+#define MDIO_DEVID2 MII_PHYSID2
+#define MDIO_SPEED 4 /* Speed ability */
+#define MDIO_DEVS1 5 /* Devices in package */
+#define MDIO_DEVS2 6
+#define MDIO_CTRL2 7 /* 10G control 2 */
+#define MDIO_STAT2 8 /* 10G status 2 */
+#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */
+#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */
+#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */
+#define MDIO_PKGID1 14 /* Package identifier */
+#define MDIO_PKGID2 15
+#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */
+#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */
+#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */
+
+/* Media-dependent registers. */
+#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
+#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */
+#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A.
+ * Lanes B-D are numbered 134-136. */
+#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */
+#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
+#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
+#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
+
+/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
+#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */
+#define MDIO_PMA_LASI_TXCTRL 0x9001 /* TX_ALARM control */
+#define MDIO_PMA_LASI_CTRL 0x9002 /* LASI control */
+#define MDIO_PMA_LASI_RXSTAT 0x9003 /* RX_ALARM status */
+#define MDIO_PMA_LASI_TXSTAT 0x9004 /* TX_ALARM status */
+#define MDIO_PMA_LASI_STAT 0x9005 /* LASI status */
+
+/* Control register 1. */
+/* Enable extended speed selection */
+#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100)
+/* All speed selection bits */
+#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c)
+#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX
+#define MDIO_CTRL1_LPOWER BMCR_PDOWN
+#define MDIO_CTRL1_RESET BMCR_RESET
+#define MDIO_PMA_CTRL1_LOOPBACK 0x0001
+#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000
+#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100
+#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART
+#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE
+#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */
+
+/* 10 Gb/s */
+#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00)
+/* 10PASS-TS/2BASE-TL */
+#define MDIO_CTRL1_SPEED10P2B (MDIO_CTRL1_SPEEDSELEXT | 0x04)
+
+/* Status register 1. */
+#define MDIO_STAT1_LPOWERABLE 0x0002 /* Low-power ability */
+#define MDIO_STAT1_LSTATUS BMSR_LSTATUS
+#define MDIO_STAT1_FAULT 0x0080 /* Fault */
+#define MDIO_AN_STAT1_LPABLE 0x0001 /* Link partner AN ability */
+#define MDIO_AN_STAT1_ABLE BMSR_ANEGCAPABLE
+#define MDIO_AN_STAT1_RFAULT BMSR_RFAULT
+#define MDIO_AN_STAT1_COMPLETE BMSR_ANEGCOMPLETE
+#define MDIO_AN_STAT1_PAGE 0x0040 /* Page received */
+#define MDIO_AN_STAT1_XNP 0x0080 /* Extended next page status */
+
+/* Speed register. */
+#define MDIO_SPEED_10G 0x0001 /* 10G capable */
+#define MDIO_PMA_SPEED_2B 0x0002 /* 2BASE-TL capable */
+#define MDIO_PMA_SPEED_10P 0x0004 /* 10PASS-TS capable */
+#define MDIO_PMA_SPEED_1000 0x0010 /* 1000M capable */
+#define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */
+#define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */
+#define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */
+
+/* Device present registers. */
+#define MDIO_DEVS_PRESENT(devad) (1 << (devad))
+#define MDIO_DEVS_PMAPMD MDIO_DEVS_PRESENT(MDIO_MMD_PMAPMD)
+#define MDIO_DEVS_WIS MDIO_DEVS_PRESENT(MDIO_MMD_WIS)
+#define MDIO_DEVS_PCS MDIO_DEVS_PRESENT(MDIO_MMD_PCS)
+#define MDIO_DEVS_PHYXS
MDIO_DEVS_PRESENT(MDIO_MMD_PHYXS)
+#define MDIO_DEVS_DTEXS
MDIO_DEVS_PRESENT(MDIO_MMD_DTEXS)
+#define MDIO_DEVS_TC MDIO_DEVS_PRESENT(MDIO_MMD_TC)
+#define MDIO_DEVS_AN MDIO_DEVS_PRESENT(MDIO_MMD_AN)
+#define MDIO_DEVS_C22EXT MDIO_DEVS_PRESENT(MDIO_MMD_C22EXT)
+
+/* Control register 2. */
+#define MDIO_PMA_CTRL2_TYPE 0x000f /* PMA/PMD type selection */
+#define MDIO_PMA_CTRL2_10GBCX4 0x0000 /* 10GBASE-CX4 type */
+#define MDIO_PMA_CTRL2_10GBEW 0x0001 /* 10GBASE-EW type */
+#define MDIO_PMA_CTRL2_10GBLW 0x0002 /* 10GBASE-LW type */
+#define MDIO_PMA_CTRL2_10GBSW 0x0003 /* 10GBASE-SW type */
+#define MDIO_PMA_CTRL2_10GBLX4 0x0004 /* 10GBASE-LX4 type */
+#define MDIO_PMA_CTRL2_10GBER 0x0005 /* 10GBASE-ER type */
+#define MDIO_PMA_CTRL2_10GBLR 0x0006 /* 10GBASE-LR type */
+#define MDIO_PMA_CTRL2_10GBSR 0x0007 /* 10GBASE-SR type */
+#define MDIO_PMA_CTRL2_10GBLRM 0x0008 /* 10GBASE-LRM type */
+#define MDIO_PMA_CTRL2_10GBT 0x0009 /* 10GBASE-T type */
+#define MDIO_PMA_CTRL2_10GBKX4 0x000a /* 10GBASE-KX4 type */
+#define MDIO_PMA_CTRL2_10GBKR 0x000b /* 10GBASE-KR type */
+#define MDIO_PMA_CTRL2_1000BT 0x000c /* 1000BASE-T type */
+#define MDIO_PMA_CTRL2_1000BKX 0x000d /* 1000BASE-KX type */
+#define MDIO_PMA_CTRL2_100BTX 0x000e /* 100BASE-TX type */
+#define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */
+#define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */
+#define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */
+#define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */
+#define MDIO_PCS_CTRL2_10GBW 0x0002 /* 10GBASE-W type */
+#define MDIO_PCS_CTRL2_10GBT 0x0003 /* 10GBASE-T type */
+
+/* Status register 2. */
+#define MDIO_STAT2_RXFAULT 0x0400 /* Receive fault */
+#define MDIO_STAT2_TXFAULT 0x0800 /* Transmit fault */
+#define MDIO_STAT2_DEVPRST 0xc000 /* Device present */
+#define MDIO_STAT2_DEVPRST_VAL 0x8000 /* Device present value */
+#define MDIO_PMA_STAT2_LBABLE 0x0001 /* PMA loopback ability */
+#define MDIO_PMA_STAT2_10GBEW 0x0002 /* 10GBASE-EW ability */
+#define MDIO_PMA_STAT2_10GBLW 0x0004 /* 10GBASE-LW ability */
+#define MDIO_PMA_STAT2_10GBSW 0x0008 /* 10GBASE-SW ability */
+#define MDIO_PMA_STAT2_10GBLX4 0x0010 /* 10GBASE-LX4 ability */
+#define MDIO_PMA_STAT2_10GBER 0x0020 /* 10GBASE-ER ability */
+#define MDIO_PMA_STAT2_10GBLR 0x0040 /* 10GBASE-LR ability */
+#define MDIO_PMA_STAT2_10GBSR 0x0080 /* 10GBASE-SR ability */
+#define MDIO_PMD_STAT2_TXDISAB 0x0100 /* PMD TX disable ability */
+#define MDIO_PMA_STAT2_EXTABLE 0x0200 /* Extended abilities */
+#define MDIO_PMA_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */
+#define MDIO_PMA_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */
+#define MDIO_PCS_STAT2_10GBR 0x0001 /* 10GBASE-R capable */
+#define MDIO_PCS_STAT2_10GBX 0x0002 /* 10GBASE-X capable */
+#define MDIO_PCS_STAT2_10GBW 0x0004 /* 10GBASE-W capable */
+#define MDIO_PCS_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */
+#define MDIO_PCS_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */
+
+/* Transmit disable register. */
+#define MDIO_PMD_TXDIS_GLOBAL 0x0001 /* Global PMD TX disable */
+#define MDIO_PMD_TXDIS_0 0x0002 /* PMD TX disable 0 */
+#define MDIO_PMD_TXDIS_1 0x0004 /* PMD TX disable 1 */
+#define MDIO_PMD_TXDIS_2 0x0008 /* PMD TX disable 2 */
+#define MDIO_PMD_TXDIS_3 0x0010 /* PMD TX disable 3 */
+
+/* Receive signal detect register. */
+#define MDIO_PMD_RXDET_GLOBAL 0x0001 /* Global PMD RX signal detect
*/
+#define MDIO_PMD_RXDET_0 0x0002 /* PMD RX signal detect 0 */
+#define MDIO_PMD_RXDET_1 0x0004 /* PMD RX signal detect 1 */
+#define MDIO_PMD_RXDET_2 0x0008 /* PMD RX signal detect 2 */
+#define MDIO_PMD_RXDET_3 0x0010 /* PMD RX signal detect 3 */
+
+/* Extended abilities register. */
+#define MDIO_PMA_EXTABLE_10GCX4 0x0001 /* 10GBASE-CX4 ability
*/
+#define MDIO_PMA_EXTABLE_10GBLRM 0x0002 /* 10GBASE-LRM ability */
+#define MDIO_PMA_EXTABLE_10GBT 0x0004 /* 10GBASE-T ability */
+#define MDIO_PMA_EXTABLE_10GBKX4 0x0008 /* 10GBASE-KX4 ability */
+#define MDIO_PMA_EXTABLE_10GBKR 0x0010 /* 10GBASE-KR ability */
+#define MDIO_PMA_EXTABLE_1000BT 0x0020 /* 1000BASE-T ability */
+#define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */
+#define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */
+#define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */
+
+/* PHY XGXS lane state register. */
+#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001
+#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002
+#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004
+#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008
+#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000
+
+/* PMA 10GBASE-T pair swap & polarity */
+#define MDIO_PMA_10GBT_SWAPPOL_ABNX 0x0001 /* Pair A/B uncrossed */
+#define MDIO_PMA_10GBT_SWAPPOL_CDNX 0x0002 /* Pair C/D uncrossed */
+#define MDIO_PMA_10GBT_SWAPPOL_AREV 0x0100 /* Pair A polarity reversed */
+#define MDIO_PMA_10GBT_SWAPPOL_BREV 0x0200 /* Pair B polarity reversed */
+#define MDIO_PMA_10GBT_SWAPPOL_CREV 0x0400 /* Pair C polarity reversed */
+#define MDIO_PMA_10GBT_SWAPPOL_DREV 0x0800 /* Pair D polarity reversed */
+
+/* PMA 10GBASE-T TX power register. */
+#define MDIO_PMA_10GBT_TXPWR_SHORT 0x0001 /* Short-reach mode */
+
+/* PMA 10GBASE-T SNR registers. */
+/* Value is SNR margin in dB, clamped to range [-127, 127], plus 0x8000. */
+#define MDIO_PMA_10GBT_SNR_BIAS 0x8000
+#define MDIO_PMA_10GBT_SNR_MAX 127
+
+/* PMA 10GBASE-R FEC ability register. */
+#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */
+#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */
+
+/* PCS 10GBASE-R/-T status register 1. */
+#define MDIO_PCS_10GBRT_STAT1_BLKLK 0x0001 /* Block lock attained */
+
+/* PCS 10GBASE-R/-T status register 2. */
+#define MDIO_PCS_10GBRT_STAT2_ERR 0x00ff
+#define MDIO_PCS_10GBRT_STAT2_BER 0x3f00
+
+/* AN 10GBASE-T control register. */
+#define MDIO_AN_10GBT_CTRL_ADV10G 0x1000 /* Advertise 10GBASE-T */
+
+/* AN 10GBASE-T status register. */
+#define MDIO_AN_10GBT_STAT_LPTRR 0x0200 /* LP training reset req. */
+#define MDIO_AN_10GBT_STAT_LPLTABLE 0x0400 /* LP loop timing ability */
+#define MDIO_AN_10GBT_STAT_LP10G 0x0800 /* LP is 10GBT capable */
+#define MDIO_AN_10GBT_STAT_REMOK 0x1000 /* Remote OK */
+#define MDIO_AN_10GBT_STAT_LOCOK 0x2000 /* Local OK */
+#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */
+#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */
+
+/* LASI RX_ALARM control/status registers. */
+#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */
+#define MDIO_PMA_LASI_RX_PCSLFLT 0x0008 /* PCS RX local fault */
+#define MDIO_PMA_LASI_RX_PMALFLT 0x0010 /* PMA/PMD RX local fault */
+#define MDIO_PMA_LASI_RX_OPTICPOWERFLT 0x0020 /* RX optical power fault */
+#define MDIO_PMA_LASI_RX_WISLFLT 0x0200 /* WIS local fault */
+
+/* LASI TX_ALARM control/status registers. */
+#define MDIO_PMA_LASI_TX_PHYXSLFLT 0x0001 /* PHY XS TX local fault */
+#define MDIO_PMA_LASI_TX_PCSLFLT 0x0008 /* PCS TX local fault */
+#define MDIO_PMA_LASI_TX_PMALFLT 0x0010 /* PMA/PMD TX local fault */
+#define MDIO_PMA_LASI_TX_LASERPOWERFLT 0x0080 /* Laser output power fault */
+#define MDIO_PMA_LASI_TX_LASERTEMPFLT 0x0100 /* Laser temperature fault */
+#define MDIO_PMA_LASI_TX_LASERBICURRFLT 0x0200 /* Laser bias current
fault */
+
+/* LASI control/status registers. */
+#define MDIO_PMA_LASI_LSALARM 0x0001 /* LS_ALARM enable/status */
+#define MDIO_PMA_LASI_TXALARM 0x0002 /* TX_ALARM enable/status */
+#define MDIO_PMA_LASI_RXALARM 0x0004 /* RX_ALARM enable/status */
+
+/* Mapping between MDIO PRTAD/DEVAD and mii_ioctl_data::phy_id */
+
+#define MDIO_PHY_ID_C45 0x8000
+#define MDIO_PHY_ID_PRTAD 0x03e0
+#define MDIO_PHY_ID_DEVAD 0x001f
+#define MDIO_PHY_ID_C45_MASK \
+ (MDIO_PHY_ID_C45 | MDIO_PHY_ID_PRTAD | MDIO_PHY_ID_DEVAD)
+
+static inline __u16 mdio_phy_id_c45(int prtad, int devad)
+{
+ return MDIO_PHY_ID_C45 | (prtad << 5) | devad;
+}
+
+static inline bool mdio_phy_id_is_c45(int phy_id)
+{
+ return (phy_id & MDIO_PHY_ID_C45) && !(phy_id & ~MDIO_PHY_ID_C45_MASK);
+}
+
+static inline __u16 mdio_phy_id_prtad(int phy_id)
+{
+ return (phy_id & MDIO_PHY_ID_PRTAD) >> 5;
+}
+
+static inline __u16 mdio_phy_id_devad(int phy_id)
+{
+ return phy_id & MDIO_PHY_ID_DEVAD;
+}
+
+#define MDIO_SUPPORTS_C22 1
+#define MDIO_SUPPORTS_C45 2
+
+#ifdef __KERNEL__
+
+/**
+ * struct mdio_if_info - Ethernet controller MDIO interface
+ * @prtad: PRTAD of the PHY (%MDIO_PRTAD_NONE if not present/unknown)
+ * @mmds: Mask of MMDs expected to be present in the PHY. This must be
+ * non-zero unless @prtad = %MDIO_PRTAD_NONE.
+ * @mode_support: MDIO modes supported. If %MDIO_SUPPORTS_C22 is set then
+ * MII register access will be passed through with @devad =
+ * %MDIO_DEVAD_NONE. If %MDIO_EMULATE_C22 is set then access to
+ * commonly used clause 22 registers will be translated into
+ * clause 45 registers.
+ * @dev: Net device structure
+ * @mdio_read: Register read function; returns value or negative error code
+ * @mdio_write: Register write function; returns 0 or negative error code
+ */
+struct mdio_if_info {
+ int prtad;
+ u32 mmds;
+ unsigned mode_support;
+
+ struct net_device *dev;
+ int (*mdio_read)(struct net_device *dev, int prtad, int devad,
+ u16 addr);
+ int (*mdio_write)(struct net_device *dev, int prtad, int devad,
+ u16 addr, u16 val);
+};
+
+#define MDIO_PRTAD_NONE (-1)
+#define MDIO_DEVAD_NONE (-1)
+#define MDIO_EMULATE_C22 4
+
+struct ethtool_cmd;
+struct ethtool_pauseparam;
+extern int mdio45_probe(struct mdio_if_info *mdio, int prtad);
+extern int mdio_set_flag(const struct mdio_if_info *mdio,
+ int prtad, int devad, u16 addr, int mask,
+ bool sense);
+extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds);
+extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
+extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
+ struct ethtool_cmd *ecmd,
+ u32 npage_adv, u32 npage_lpa);
+extern void
+mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
+ const struct ethtool_pauseparam *ecmd);
+
+/**
+ * mdio45_ethtool_gset - get settings for ETHTOOL_GSET
+ * @mdio: MDIO interface
+ * @ecmd: Ethtool request structure
+ *
+ * Since the CSRs for auto-negotiation using next pages are not fully
+ * standardised, this function does not attempt to decode them. Use
+ * mdio45_ethtool_gset_npage() to specify advertisement bits from next
+ * pages.
+ */
+static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
+ struct ethtool_cmd *ecmd)
+{
+ mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
+}
+
+extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
+ struct mii_ioctl_data *mii_data, int cmd);
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_MDIO_H__ */
diff --git a/libdde-linux26/contrib/include/net/ethoc.h
b/libdde-linux26/contrib/include/net/ethoc.h
new file mode 100644
index 0000000..96f3789
--- /dev/null
+++ b/libdde-linux26/contrib/include/net/ethoc.h
@@ -0,0 +1,22 @@
+/*
+ * linux/include/net/ethoc.h
+ *
+ * Copyright (C) 2008-2009 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by Thierry Reding <address@hidden>
+ */
+
+#ifndef LINUX_NET_ETHOC_H
+#define LINUX_NET_ETHOC_H 1
+
+struct ethoc_platform_data {
+ u8 hwaddr[IFHWADDRLEN];
+ s8 phy_id;
+};
+
+#endif /* !LINUX_NET_ETHOC_H */
+
diff --git a/libdde-linux26/contrib/kernel/rcuclassic.c
b/libdde-linux26/contrib/kernel/rcuclassic.c
new file mode 100644
index 0000000..654c640
--- /dev/null
+++ b/libdde-linux26/contrib/kernel/rcuclassic.c
@@ -0,0 +1,788 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2001
+ *
+ * Authors: Dipankar Sarma <address@hidden>
+ * Manfred Spraul <address@hidden>
+ *
+ * Based on the original work by Paul McKenney <address@hidden>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ * Papers:
+ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
+ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key rcu_lock_key;
+struct lockdep_map rcu_lock_map =
+ STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
+EXPORT_SYMBOL_GPL(rcu_lock_map);
+#endif
+
+
+/* Definition for rcupdate control block. */
+static struct rcu_ctrlblk rcu_ctrlblk = {
+ .cur = -300,
+ .completed = -300,
+ .pending = -300,
+ .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
+ .cpumask = CPU_BITS_NONE,
+};
+static struct rcu_ctrlblk rcu_bh_ctrlblk = {
+ .cur = -300,
+ .completed = -300,
+ .pending = -300,
+ .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
+ .cpumask = CPU_BITS_NONE,
+};
+
+DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
+DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
+
+static int blimit = 10;
+static int qhimark = 10000;
+static int qlowmark = 100;
+
+#ifdef CONFIG_SMP
+static void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ int cpu;
+ unsigned long flags;
+
+ set_need_resched();
+ spin_lock_irqsave(&rcp->lock, flags);
+ if (unlikely(!rcp->signaled)) {
+ rcp->signaled = 1;
+ /*
+ * Don't send IPI to itself. With irqs disabled,
+ * rdp->cpu is the current cpu.
+ *
+ * cpu_online_mask is updated by the _cpu_down()
+ * using __stop_machine(). Since we're in irqs disabled
+ * section, __stop_machine() is not exectuting, hence
+ * the cpu_online_mask is stable.
+ *
+ * However, a cpu might have been offlined _just_ before
+ * we disabled irqs while entering here.
+ * And rcu subsystem might not yet have handled the CPU_DEAD
+ * notification, leading to the offlined cpu's bit
+ * being set in the rcp->cpumask.
+ *
+ * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
+ * sending smp_reschedule() to an offlined CPU.
+ */
+ for_each_cpu_and(cpu,
+ to_cpumask(rcp->cpumask), cpu_online_mask) {
+ if (cpu != rdp->cpu)
+ smp_send_reschedule(cpu);
+ }
+ }
+ spin_unlock_irqrestore(&rcp->lock, flags);
+}
+#else
+static inline void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ set_need_resched();
+}
+#endif
+
+static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ long batch;
+
+ head->next = NULL;
+ smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
+
+ /*
+ * Determine the batch number of this callback.
+ *
+ * Using ACCESS_ONCE to avoid the following error when gcc eliminates
+ * local variable "batch" and emits codes like this:
+ * 1) rdp->batch = rcp->cur + 1 # gets old value
+ * ......
+ * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
+ * then [*nxttail[0], *nxttail[1]) may contain callbacks
+ * that batch# = rdp->batch, see the comment of struct rcu_data.
+ */
+ batch = ACCESS_ONCE(rcp->cur) + 1;
+
+ if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
+ /* process callbacks */
+ rdp->nxttail[0] = rdp->nxttail[1];
+ rdp->nxttail[1] = rdp->nxttail[2];
+ if (rcu_batch_after(batch - 1, rdp->batch))
+ rdp->nxttail[0] = rdp->nxttail[2];
+ }
+
+ rdp->batch = batch;
+ *rdp->nxttail[2] = head;
+ rdp->nxttail[2] = &head->next;
+
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = INT_MAX;
+ force_quiescent_state(rdp, &rcu_ctrlblk);
+ }
+}
+
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+
+static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
+{
+ rcp->gp_start = jiffies;
+ rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
+}
+
+static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+ int cpu;
+ long delta;
+ unsigned long flags;
+
+ /* Only let one CPU complain about others per time interval. */
+
+ spin_lock_irqsave(&rcp->lock, flags);
+ delta = jiffies - rcp->jiffies_stall;
+ if (delta < 2 || rcp->cur != rcp->completed) {
+ spin_unlock_irqrestore(&rcp->lock, flags);
+ return;
+ }
+ rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+ spin_unlock_irqrestore(&rcp->lock, flags);
+
+ /* OK, time to rat on our buddy... */
+
+ printk(KERN_ERR "INFO: RCU detected CPU stalls:");
+ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
+ printk(" %d", cpu);
+ }
+ printk(" (detected by %d, t=%ld jiffies)\n",
+ smp_processor_id(), (long)(jiffies - rcp->gp_start));
+}
+
+static void print_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+ unsigned long flags;
+
+ printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
+ smp_processor_id(), jiffies,
+ jiffies - rcp->gp_start);
+ dump_stack();
+ spin_lock_irqsave(&rcp->lock, flags);
+ if ((long)(jiffies - rcp->jiffies_stall) >= 0)
+ rcp->jiffies_stall =
+ jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+ spin_unlock_irqrestore(&rcp->lock, flags);
+ set_need_resched(); /* kick ourselves to get things going. */
+}
+
+static void check_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+ long delta;
+
+ delta = jiffies - rcp->jiffies_stall;
+ if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
+ delta >= 0) {
+
+ /* We haven't checked in, so go dump stack. */
+ print_cpu_stall(rcp);
+
+ } else if (rcp->cur != rcp->completed && delta >= 2) {
+
+ /* They had two seconds to dump stack, so complain. */
+ print_other_cpu_stall(rcp);
+ }
+}
+
+#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
+{
+}
+
+static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/**
+ * call_rcu - Queue an RCU callback for invocation after a grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ *
+ * The update function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ */
+void call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu))
+{
+ unsigned long flags;
+
+ head->func = func;
+ local_irq_save(flags);
+ __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/**
+ * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ *
+ * The update function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_bh() assumes
+ * that the read-side critical sections end on completion of a softirq
+ * handler. This means that read-side critical sections in process
+ * context must not be interrupted by softirqs. This interface is to be
+ * used when most of the read-side critical sections are in softirq context.
+ * RCU read-side critical sections are delimited by rcu_read_lock() and
+ * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
+ * and rcu_read_unlock_bh(), if in process context. These may be nested.
+ */
+void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu))
+{
+ unsigned long flags;
+
+ head->func = func;
+ local_irq_save(flags);
+ __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu_bh);
+
+/*
+ * Return the number of RCU batches processed thus far. Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed(void)
+{
+ return rcu_ctrlblk.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Return the number of RCU batches processed thus far. Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed_bh(void)
+{
+ return rcu_bh_ctrlblk.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+
+/* Raises the softirq for processing rcu_callbacks. */
+static inline void raise_rcu_softirq(void)
+{
+ raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Invoke the completed RCU callbacks. They are expected to be in
+ * a per-cpu list.
+ */
+static void rcu_do_batch(struct rcu_data *rdp)
+{
+ unsigned long flags;
+ struct rcu_head *next, *list;
+ int count = 0;
+
+ list = rdp->donelist;
+ while (list) {
+ next = list->next;
+ prefetch(next);
+ list->func(list);
+ list = next;
+ if (++count >= rdp->blimit)
+ break;
+ }
+ rdp->donelist = list;
+
+ local_irq_save(flags);
+ rdp->qlen -= count;
+ local_irq_restore(flags);
+ if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
+ rdp->blimit = blimit;
+
+ if (!rdp->donelist)
+ rdp->donetail = &rdp->donelist;
+ else
+ raise_rcu_softirq();
+}
+
+/*
+ * Grace period handling:
+ * The grace period handling consists out of two steps:
+ * - A new grace period is started.
+ * This is done by rcu_start_batch. The start is not broadcasted to
+ * all cpus, they must pick this up by comparing rcp->cur with
+ * rdp->quiescbatch. All cpus are recorded in the
+ * rcu_ctrlblk.cpumask bitmap.
+ * - All cpus must go through a quiescent state.
+ * Since the start of the grace period is not broadcasted, at least two
+ * calls to rcu_check_quiescent_state are required:
+ * The first call just notices that a new grace period is running. The
+ * following calls check if there was a quiescent state since the beginning
+ * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
+ * the bitmap is empty, then the grace period is completed.
+ * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
+ * period (if necessary).
+ */
+
+/*
+ * Register a new batch of callbacks, and start it up if there is currently no
+ * active batch and the batch to be registered has not already occurred.
+ * Caller must hold rcu_ctrlblk.lock.
+ */
+static void rcu_start_batch(struct rcu_ctrlblk *rcp)
+{
+ if (rcp->cur != rcp->pending &&
+ rcp->completed == rcp->cur) {
+ rcp->cur++;
+ record_gp_stall_check_time(rcp);
+
+ /*
+ * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
+ * Barrier Otherwise it can cause tickless idle CPUs to be
+ * included in rcp->cpumask, which will extend graceperiods
+ * unnecessarily.
+ */
+ smp_mb();
+ cpumask_andnot(to_cpumask(rcp->cpumask),
+ cpu_online_mask, nohz_cpu_mask);
+
+ rcp->signaled = 0;
+ }
+}
+
+/*
+ * cpu went through a quiescent state since the beginning of the grace period.
+ * Clear it from the cpu mask and complete the grace period if it was the last
+ * cpu. Start another grace period if someone has further entries pending
+ */
+static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
+{
+ cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
+ if (cpumask_empty(to_cpumask(rcp->cpumask))) {
+ /* batch completed ! */
+ rcp->completed = rcp->cur;
+ rcu_start_batch(rcp);
+ }
+}
+
+/*
+ * Check if the cpu has gone through a quiescent state (say context
+ * switch). If so and if it already hasn't done so in this RCU
+ * quiescent cycle, then indicate that it has done so.
+ */
+static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ unsigned long flags;
+
+ if (rdp->quiescbatch != rcp->cur) {
+ /* start new grace period: */
+ rdp->qs_pending = 1;
+ rdp->passed_quiesc = 0;
+ rdp->quiescbatch = rcp->cur;
+ return;
+ }
+
+ /* Grace period already completed for this cpu?
+ * qs_pending is checked instead of the actual bitmap to avoid
+ * cacheline trashing.
+ */
+ if (!rdp->qs_pending)
+ return;
+
+ /*
+ * Was there a quiescent state since the beginning of the grace
+ * period? If no, then exit and wait for the next call.
+ */
+ if (!rdp->passed_quiesc)
+ return;
+ rdp->qs_pending = 0;
+
+ spin_lock_irqsave(&rcp->lock, flags);
+ /*
+ * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
+ * during cpu startup. Ignore the quiescent state.
+ */
+ if (likely(rdp->quiescbatch == rcp->cur))
+ cpu_quiet(rdp->cpu, rcp);
+
+ spin_unlock_irqrestore(&rcp->lock, flags);
+}
+
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
+ * locking requirements, the list it's pulling from has to belong to a cpu
+ * which is dead and hence not processing interrupts.
+ */
+static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
+ struct rcu_head **tail, long batch)
+{
+ unsigned long flags;
+
+ if (list) {
+ local_irq_save(flags);
+ this_rdp->batch = batch;
+ *this_rdp->nxttail[2] = list;
+ this_rdp->nxttail[2] = tail;
+ local_irq_restore(flags);
+ }
+}
+
+static void __rcu_offline_cpu(struct rcu_data *this_rdp,
+ struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
+{
+ unsigned long flags;
+
+ /*
+ * if the cpu going offline owns the grace period
+ * we can block indefinitely waiting for it, so flush
+ * it here
+ */
+ spin_lock_irqsave(&rcp->lock, flags);
+ if (rcp->cur != rcp->completed)
+ cpu_quiet(rdp->cpu, rcp);
+ rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
+ rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
+ spin_unlock(&rcp->lock);
+
+ this_rdp->qlen += rdp->qlen;
+ local_irq_restore(flags);
+}
+
+static void rcu_offline_cpu(int cpu)
+{
+ struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
+ struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
+
+ __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
+ &per_cpu(rcu_data, cpu));
+ __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
+ &per_cpu(rcu_bh_data, cpu));
+ put_cpu_var(rcu_data);
+ put_cpu_var(rcu_bh_data);
+}
+
+#else
+
+static void rcu_offline_cpu(int cpu)
+{
+}
+
+#endif
+
+/*
+ * This does the RCU processing work from softirq context.
+ */
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ unsigned long flags;
+ long completed_snap;
+
+ if (rdp->nxtlist) {
+ local_irq_save(flags);
+ completed_snap = ACCESS_ONCE(rcp->completed);
+
+ /*
+ * move the other grace-period-completed entries to
+ * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
+ */
+ if (!rcu_batch_before(completed_snap, rdp->batch))
+ rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
+ else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
+ rdp->nxttail[0] = rdp->nxttail[1];
+
+ /*
+ * the grace period for entries in
+ * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
+ * move these entries to donelist
+ */
+ if (rdp->nxttail[0] != &rdp->nxtlist) {
+ *rdp->donetail = rdp->nxtlist;
+ rdp->donetail = rdp->nxttail[0];
+ rdp->nxtlist = *rdp->nxttail[0];
+ *rdp->donetail = NULL;
+
+ if (rdp->nxttail[1] == rdp->nxttail[0])
+ rdp->nxttail[1] = &rdp->nxtlist;
+ if (rdp->nxttail[2] == rdp->nxttail[0])
+ rdp->nxttail[2] = &rdp->nxtlist;
+ rdp->nxttail[0] = &rdp->nxtlist;
+ }
+
+ local_irq_restore(flags);
+
+ if (rcu_batch_after(rdp->batch, rcp->pending)) {
+ unsigned long flags2;
+
+ /* and start it/schedule start if it's a new batch */
+ spin_lock_irqsave(&rcp->lock, flags2);
+ if (rcu_batch_after(rdp->batch, rcp->pending)) {
+ rcp->pending = rdp->batch;
+ rcu_start_batch(rcp);
+ }
+ spin_unlock_irqrestore(&rcp->lock, flags2);
+ }
+ }
+
+ rcu_check_quiescent_state(rcp, rdp);
+ if (rdp->donelist)
+ rcu_do_batch(rdp);
+}
+
+static void rcu_process_callbacks(struct softirq_action *unused)
+{
+ /*
+ * Memory references from any prior RCU read-side critical sections
+ * executed by the interrupted code must be see before any RCU
+ * grace-period manupulations below.
+ */
+
+ smp_mb(); /* See above block comment. */
+
+ __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
+ __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
+
+ /*
+ * Memory references from any later RCU read-side critical sections
+ * executed by the interrupted code must be see after any RCU
+ * grace-period manupulations above.
+ */
+
+ smp_mb(); /* See above block comment. */
+}
+
+static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
+{
+ /* Check for CPU stalls, if enabled. */
+ check_cpu_stall(rcp);
+
+ if (rdp->nxtlist) {
+ long completed_snap = ACCESS_ONCE(rcp->completed);
+
+ /*
+ * This cpu has pending rcu entries and the grace period
+ * for them has completed.
+ */
+ if (!rcu_batch_before(completed_snap, rdp->batch))
+ return 1;
+ if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
+ rdp->nxttail[0] != rdp->nxttail[1])
+ return 1;
+ if (rdp->nxttail[0] != &rdp->nxtlist)
+ return 1;
+
+ /*
+ * This cpu has pending rcu entries and the new batch
+ * for then hasn't been started nor scheduled start
+ */
+ if (rcu_batch_after(rdp->batch, rcp->pending))
+ return 1;
+ }
+
+ /* This cpu has finished callbacks to invoke */
+ if (rdp->donelist)
+ return 1;
+
+ /* The rcu core waits for a quiescent state from the cpu */
+ if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
+ return 1;
+
+ /* nothing to do */
+ return 0;
+}
+
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, returning 1 if so. This function is part of the
+ * RCU implementation; it is -not- an exported member of the RCU API.
+ */
+int rcu_pending(int cpu)
+{
+ return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
+ __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
+}
+
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so. This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
+
+ return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
+}
+
+/*
+ * Top-level function driving RCU grace-period detection, normally
+ * invoked from the scheduler-clock interrupt. This function simply
+ * increments counters that are read only from softirq by this same
+ * CPU, so there are no memory barriers required.
+ */
+void rcu_check_callbacks(int cpu, int user)
+{
+ if (user ||
+ (idle_cpu(cpu) && rcu_scheduler_active &&
+ !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+
+ /*
+ * Get here if this CPU took its interrupt from user
+ * mode or from the idle loop, and if this is not a
+ * nested interrupt. In this case, the CPU is in
+ * a quiescent state, so count it.
+ *
+ * Also do a memory barrier. This is needed to handle
+ * the case where writes from a preempt-disable section
+ * of code get reordered into schedule() by this CPU's
+ * write buffer. The memory barrier makes sure that
+ * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
+ * by other CPUs to happen after any such write.
+ */
+
+ smp_mb(); /* See above block comment. */
+ rcu_qsctr_inc(cpu);
+ rcu_bh_qsctr_inc(cpu);
+
+ } else if (!in_softirq()) {
+
+ /*
+ * Get here if this CPU did not take its interrupt from
+ * softirq, in other words, if it is not interrupting
+ * a rcu_bh read-side critical section. This is an _bh
+ * critical section, so count it. The memory barrier
+ * is needed for the same reason as is the above one.
+ */
+
+ smp_mb(); /* See above block comment. */
+ rcu_bh_qsctr_inc(cpu);
+ }
+ raise_rcu_softirq();
+}
+
+static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rcp->lock, flags);
+ memset(rdp, 0, sizeof(*rdp));
+ rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
+ rdp->donetail = &rdp->donelist;
+ rdp->quiescbatch = rcp->completed;
+ rdp->qs_pending = 0;
+ rdp->cpu = cpu;
+ rdp->blimit = blimit;
+ spin_unlock_irqrestore(&rcp->lock, flags);
+}
+
+static void __cpuinit rcu_online_cpu(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
+
+ rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
+ rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+}
+
+static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ rcu_online_cpu(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ rcu_offline_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata rcu_nb = {
+ .notifier_call = rcu_cpu_notify,
+};
+
+/*
+ * Initializes rcu mechanism. Assumed to be called early.
+ * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
+ * Note that rcu_qsctr and friends are implicitly
+ * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
+ */
+void __init __rcu_init(void)
+{
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+ printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+ rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
+ (void *)(long)smp_processor_id());
+ /* Register notifier for non-boot CPUs */
+ register_cpu_notifier(&rcu_nb);
+}
+
+module_param(blimit, int, 0);
+module_param(qhimark, int, 0);
+module_param(qlowmark, int, 0);
diff --git a/libdde-linux26/contrib/lib/devres.c
b/libdde-linux26/contrib/lib/devres.c
new file mode 100644
index 0000000..72c8909
--- /dev/null
+++ b/libdde-linux26/contrib/lib/devres.c
@@ -0,0 +1,351 @@
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+void devm_ioremap_release(struct device *dev, void *res)
+{
+ iounmap(*(void __iomem **)res);
+}
+
+static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
+{
+ return *(void **)res == match_data;
+}
+
+/**
+ * devm_ioremap - Managed ioremap()
+ * @dev: Generic device to remap IO address for
+ * @offset: BUS offset to map
+ * @size: Size of map
+ *
+ * Managed ioremap(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
+ unsigned long size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioremap(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioremap);
+
+/**
+ * devm_ioremap_nocache - Managed ioremap_nocache()
+ * @dev: Generic device to remap IO address for
+ * @offset: BUS offset to map
+ * @size: Size of map
+ *
+ * Managed ioremap_nocache(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
+ unsigned long size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioremap_nocache(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioremap_nocache);
+
+/**
+ * devm_iounmap - Managed iounmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
+ */
+void devm_iounmap(struct device *dev, void __iomem *addr)
+{
+ iounmap(addr);
+ WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
+ (void *)addr));
+}
+EXPORT_SYMBOL(devm_iounmap);
+
+#ifdef CONFIG_HAS_IOPORT
+/*
+ * Generic iomap devres
+ */
+static void devm_ioport_map_release(struct device *dev, void *res)
+{
+ ioport_unmap(*(void __iomem **)res);
+}
+
+static int devm_ioport_map_match(struct device *dev, void *res,
+ void *match_data)
+{
+ return *(void **)res == match_data;
+}
+
+/**
+ * devm_ioport_map - Managed ioport_map()
+ * @dev: Generic device to map ioport for
+ * @port: Port to map
+ * @nr: Number of ports to map
+ *
+ * Managed ioport_map(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
+ unsigned int nr)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioport_map(port, nr);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioport_map);
+
+/**
+ * devm_ioport_unmap - Managed ioport_unmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed ioport_unmap(). @addr must have been mapped using
+ * devm_ioport_map().
+ */
+void devm_ioport_unmap(struct device *dev, void __iomem *addr)
+{
+ ioport_unmap(addr);
+ WARN_ON(devres_destroy(dev, devm_ioport_map_release,
+ devm_ioport_map_match, (void *)addr));
+}
+EXPORT_SYMBOL(devm_ioport_unmap);
+
+#ifdef CONFIG_PCI
+/*
+ * PCI iomap devres
+ */
+#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
+
+struct pcim_iomap_devres {
+ void __iomem *table[PCIM_IOMAP_MAX];
+};
+
+static void pcim_iomap_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
+ struct pcim_iomap_devres *this = res;
+ int i;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (this->table[i])
+ pci_iounmap(dev, this->table[i]);
+}
+
+/**
+ * pcim_iomap_table - access iomap allocation table
+ * @pdev: PCI device to access iomap table for
+ *
+ * Access iomap allocation table for @dev. If iomap table doesn't
+ * exist and @pdev is managed, it will be allocated. All iomaps
+ * recorded in the iomap table are automatically unmapped on driver
+ * detach.
+ *
+ * This function might sleep when the table is first allocated but can
+ * be safely called without context and guaranteed to succed once
+ * allocated.
+ */
+void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
+ if (dr)
+ return dr->table;
+
+ new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
+ if (!new_dr)
+ return NULL;
+ dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
+ return dr->table;
+}
+EXPORT_SYMBOL(pcim_iomap_table);
+
+/**
+ * pcim_iomap - Managed pcim_iomap()
+ * @pdev: PCI device to iomap for
+ * @bar: BAR to iomap
+ * @maxlen: Maximum length of iomap
+ *
+ * Managed pci_iomap(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+{
+ void __iomem **tbl;
+
+ BUG_ON(bar >= PCIM_IOMAP_MAX);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
+ return NULL;
+
+ tbl[bar] = pci_iomap(pdev, bar, maxlen);
+ return tbl[bar];
+}
+EXPORT_SYMBOL(pcim_iomap);
+
+/**
+ * pcim_iounmap - Managed pci_iounmap()
+ * @pdev: PCI device to iounmap for
+ * @addr: Address to unmap
+ *
+ * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
+ */
+void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ void __iomem **tbl;
+ int i;
+
+ pci_iounmap(pdev, addr);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ BUG_ON(!tbl);
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (tbl[i] == addr) {
+ tbl[i] = NULL;
+ return;
+ }
+ WARN_ON(1);
+}
+EXPORT_SYMBOL(pcim_iounmap);
+
+/**
+ * pcim_iomap_regions - Request and iomap PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to request and iomap
+ * @name: Name used when requesting regions
+ *
+ * Request and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
+{
+ void __iomem * const *iomap;
+ int i, rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ unsigned long len;
+
+ if (!(mask & (1 << i)))
+ continue;
+
+ rc = -EINVAL;
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ goto err_inval;
+
+ rc = pci_request_region(pdev, i, name);
+ if (rc)
+ goto err_inval;
+
+ rc = -ENOMEM;
+ if (!pcim_iomap(pdev, i, 0))
+ goto err_region;
+ }
+
+ return 0;
+
+ err_region:
+ pci_release_region(pdev, i);
+ err_inval:
+ while (--i >= 0) {
+ if (!(mask & (1 << i)))
+ continue;
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
+
+/**
+ * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to iomap
+ * @name: Name used when requesting regions
+ *
+ * Request all PCI BARs and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask,
+ const char *name)
+{
+ int request_mask = ((1 << 6) - 1) & ~mask;
+ int rc;
+
+ rc = pci_request_selected_regions(pdev, request_mask, name);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, mask, name);
+ if (rc)
+ pci_release_selected_regions(pdev, request_mask);
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions_request_all);
+
+/**
+ * pcim_iounmap_regions - Unmap and release PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to unmap and release
+ *
+ * Unamp and release regions specified by @mask.
+ */
+void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
+{
+ void __iomem * const *iomap;
+ int i;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+}
+EXPORT_SYMBOL(pcim_iounmap_regions);
+#endif
+#endif
--
Alioth's /usr/local/bin/git-commit-notice on
/srv/git.debian.org/git/pkg-hurd/hurd.git
- [hurd] branch master updated (ea2a883 -> a3d8e36), Samuel Thibault, 2015/09/22
- [hurd] 01/05: Remove dh-exec workaround, fixed in newer dh-exec, Samuel Thibault, 2015/09/22
- [hurd] 04/05: New upstream snapshot,
Samuel Thibault <=
- [hurd] 05/05: Merge branch 'upstream-merged', Samuel Thibault, 2015/09/22
- [hurd] 03/05: Merge branch 'upstream-merged', Samuel Thibault, 2015/09/22
- [hurd] 02/05: New upstream snapshot, Samuel Thibault, 2015/09/22