Commit 4e9a35d3 authored by Stephen Boyd's avatar Stephen Boyd
Browse files

Merge tag 'ti-clk-for-5.1' of...

Merge tag 'ti-clk-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kristo/linux into clk-ti

Pull TI clock driver updates from Tero Kristo:

Contains the CLK_IS_BASIC flag cleanup, getting rid of the flag
completely from TI clock driver.

Also contains the autoidle clock support series from Andreas Kemnade,
which is needed for handling the hdq clocks properly during low power
states; at least on gta04 board.

* tag 'ti-clk-for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kristo/linux:
  ARM: OMAP2+: hwmod: disable ick autoidling when a hwmod requires that
  clk: ti: check clock type before doing autoidle ops
  clk: ti: add a usecount for autoidle
  clk: ti: generalize the init sequence of clk_hw_omap clocks
  clk: ti: remove usage of CLK_IS_BASIC
  clk: ti: add new API for checking if a provided clock is an OMAP clock
  clk: ti: move clk_hw_omap list handling under generic part of the driver
parents bfeffd15 12af39ca
Loading
Loading
Loading
Loading
+12 −4
Original line number Diff line number Diff line
@@ -1002,9 +1002,11 @@ static int _enable_clocks(struct omap_hwmod *oh)
		clk_enable(oh->_clk);

	list_for_each_entry(os, &oh->slave_ports, node) {
		if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE))
		if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) {
			omap2_clk_deny_idle(os->_clk);
			clk_enable(os->_clk);
		}
	}

	/* The opt clocks are controlled by the device driver. */

@@ -1055,8 +1057,10 @@ static int _disable_clocks(struct omap_hwmod *oh)
		clk_disable(oh->_clk);

	list_for_each_entry(os, &oh->slave_ports, node) {
		if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE))
		if (os->_clk && (os->flags & OCPIF_SWSUP_IDLE)) {
			clk_disable(os->_clk);
			omap2_clk_allow_idle(os->_clk);
		}
	}

	if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
@@ -2436,9 +2440,13 @@ static void _setup_iclk_autoidle(struct omap_hwmod *oh)
			continue;

		if (os->flags & OCPIF_SWSUP_IDLE) {
			/* XXX omap_iclk_deny_idle(c); */
			/*
			 * we might have multiple users of one iclk with
			 * different requirements, disable autoidle when
			 * the module is enabled, e.g. dss iclk
			 */
		} else {
			/* XXX omap_iclk_allow_idle(c); */
			/* we are enabling autoidle afterwards anyways */
			clk_enable(os->_clk);
		}
	}
+1 −1
Original line number Diff line number Diff line
@@ -614,7 +614,7 @@ static int ti_adpll_init_clkout(struct ti_adpll_data *d,

	init.name = child_name;
	init.ops = ops;
	init.flags = CLK_IS_BASIC;
	init.flags = 0;
	co->hw.init = &init;
	parent_names[0] = __clk_get_name(clk0);
	parent_names[1] = __clk_get_name(clk1);
+2 −2
Original line number Diff line number Diff line
@@ -165,7 +165,7 @@ static void __init omap_clk_register_apll(void *user,

	ad->clk_bypass = __clk_get_hw(clk);

	clk = ti_clk_register(NULL, &clk_hw->hw, node->name);
	clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
	if (!IS_ERR(clk)) {
		of_clk_add_provider(node, of_clk_src_simple_get, clk);
		kfree(clk_hw->hw.init->parent_names);
@@ -402,7 +402,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
	if (ret)
		goto cleanup;

	clk = clk_register(NULL, &clk_hw->hw);
	clk = ti_clk_register_omap_hw(NULL, &clk_hw->hw, node->name);
	if (!IS_ERR(clk)) {
		of_clk_add_provider(node, of_clk_src_simple_get, clk);
		kfree(init);
+62 −39
Original line number Diff line number Diff line
@@ -35,7 +35,44 @@ struct clk_ti_autoidle {
#define AUTOIDLE_LOW		0x1

static LIST_HEAD(autoidle_clks);
static LIST_HEAD(clk_hw_omap_clocks);

/*
 * we have some non-atomic read/write
 * operations behind it, so lets
 * take one lock for handling autoidle
 * of all clocks
 */
static DEFINE_SPINLOCK(autoidle_spinlock);

static int _omap2_clk_deny_idle(struct clk_hw_omap *clk)
{
	if (clk->ops && clk->ops->deny_idle) {
		unsigned long irqflags;

		spin_lock_irqsave(&autoidle_spinlock, irqflags);
		clk->autoidle_count++;
		if (clk->autoidle_count == 1)
			clk->ops->deny_idle(clk);

		spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
	}
	return 0;
}

static int _omap2_clk_allow_idle(struct clk_hw_omap *clk)
{
	if (clk->ops && clk->ops->allow_idle) {
		unsigned long irqflags;

		spin_lock_irqsave(&autoidle_spinlock, irqflags);
		clk->autoidle_count--;
		if (clk->autoidle_count == 0)
			clk->ops->allow_idle(clk);

		spin_unlock_irqrestore(&autoidle_spinlock, irqflags);
	}
	return 0;
}

/**
 * omap2_clk_deny_idle - disable autoidle on an OMAP clock
@@ -45,12 +82,15 @@ static LIST_HEAD(clk_hw_omap_clocks);
 */
int omap2_clk_deny_idle(struct clk *clk)
{
	struct clk_hw_omap *c;
	struct clk_hw *hw = __clk_get_hw(clk);

	c = to_clk_hw_omap(__clk_get_hw(clk));
	if (c->ops && c->ops->deny_idle)
		c->ops->deny_idle(c);
	return 0;
	if (omap2_clk_is_hw_omap(hw)) {
		struct clk_hw_omap *c = to_clk_hw_omap(hw);

		return _omap2_clk_deny_idle(c);
	}

	return -EINVAL;
}

/**
@@ -61,12 +101,15 @@ int omap2_clk_deny_idle(struct clk *clk)
 */
int omap2_clk_allow_idle(struct clk *clk)
{
	struct clk_hw_omap *c;
	struct clk_hw *hw = __clk_get_hw(clk);

	c = to_clk_hw_omap(__clk_get_hw(clk));
	if (c->ops && c->ops->allow_idle)
		c->ops->allow_idle(c);
	return 0;
	if (omap2_clk_is_hw_omap(hw)) {
		struct clk_hw_omap *c = to_clk_hw_omap(hw);

		return _omap2_clk_allow_idle(c);
	}

	return -EINVAL;
}

static void _allow_autoidle(struct clk_ti_autoidle *clk)
@@ -167,26 +210,6 @@ int __init of_ti_clk_autoidle_setup(struct device_node *node)
	return 0;
}

/**
 * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
 * @hw: struct clk_hw * to initialize
 *
 * Add an OMAP clock @clk to the internal list of OMAP clocks.  Used
 * temporarily for autoidle handling, until this support can be
 * integrated into the common clock framework code in some way.  No
 * return value.
 */
void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
{
	struct clk_hw_omap *c;

	if (clk_hw_get_flags(hw) & CLK_IS_BASIC)
		return;

	c = to_clk_hw_omap(hw);
	list_add(&c->node, &clk_hw_omap_clocks);
}

/**
 * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
 * support it
@@ -198,11 +221,11 @@ void omap2_init_clk_hw_omap_clocks(struct clk_hw *hw)
 */
int omap2_clk_enable_autoidle_all(void)
{
	struct clk_hw_omap *c;
	int ret;

	list_for_each_entry(c, &clk_hw_omap_clocks, node)
		if (c->ops && c->ops->allow_idle)
			c->ops->allow_idle(c);
	ret = omap2_clk_for_each(_omap2_clk_allow_idle);
	if (ret)
		return ret;

	_clk_generic_allow_autoidle_all();

@@ -220,11 +243,11 @@ int omap2_clk_enable_autoidle_all(void)
 */
int omap2_clk_disable_autoidle_all(void)
{
	struct clk_hw_omap *c;
	int ret;

	list_for_each_entry(c, &clk_hw_omap_clocks, node)
		if (c->ops && c->ops->deny_idle)
			c->ops->deny_idle(c);
	ret = omap2_clk_for_each(_omap2_clk_deny_idle);
	if (ret)
		return ret;

	_clk_generic_deny_autoidle_all();

+72 −0
Original line number Diff line number Diff line
@@ -31,6 +31,7 @@
#undef pr_fmt
#define pr_fmt(fmt) "%s: " fmt, __func__

static LIST_HEAD(clk_hw_omap_clocks);
struct ti_clk_ll_ops *ti_clk_ll_ops;
static struct device_node *clocks_node_ptr[CLK_MAX_MEMMAPS];

@@ -517,3 +518,74 @@ struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,

	return clk;
}

/**
 * ti_clk_register_omap_hw - register a clk_hw_omap to the clock framework
 * @dev: device for this clock
 * @hw: hardware clock handle
 * @con: connection ID for this clock
 *
 * Registers a clk_hw_omap clock to the clock framewor, adds a clock alias
 * for it, and adds the list to the available clk_hw_omap type clocks.
 * Returns a handle to the registered clock if successful, ERR_PTR value
 * in failure.
 */
struct clk *ti_clk_register_omap_hw(struct device *dev, struct clk_hw *hw,
				    const char *con)
{
	struct clk *clk;
	struct clk_hw_omap *oclk;

	clk = ti_clk_register(dev, hw, con);
	if (IS_ERR(clk))
		return clk;

	oclk = to_clk_hw_omap(hw);

	list_add(&oclk->node, &clk_hw_omap_clocks);

	return clk;
}

/**
 * omap2_clk_for_each - call function for each registered clk_hw_omap
 * @fn: pointer to a callback function
 *
 * Call @fn for each registered clk_hw_omap, passing @hw to each
 * function.  @fn must return 0 for success or any other value for
 * failure.  If @fn returns non-zero, the iteration across clocks
 * will stop and the non-zero return value will be passed to the
 * caller of omap2_clk_for_each().
 */
int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw))
{
	int ret;
	struct clk_hw_omap *hw;

	list_for_each_entry(hw, &clk_hw_omap_clocks, node) {
		ret = (*fn)(hw);
		if (ret)
			break;
	}

	return ret;
}

/**
 * omap2_clk_is_hw_omap - check if the provided clk_hw is OMAP clock
 * @hw: clk_hw to check if it is an omap clock or not
 *
 * Checks if the provided clk_hw is OMAP clock or not. Returns true if
 * it is, false otherwise.
 */
bool omap2_clk_is_hw_omap(struct clk_hw *hw)
{
	struct clk_hw_omap *oclk;

	list_for_each_entry(oclk, &clk_hw_omap_clocks, node) {
		if (&oclk->hw == hw)
			return true;
	}

	return false;
}
Loading